code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from random import randint, random
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any = False , SCREAMING_SNAKE_CASE : List[Any] = False , SCREAMING_SNAKE_CASE : Union[str, Any] = 5 , ) -> Any:
__lowercase = [[-1] * number_of_cells] # Create a highway without any car
__lowercase = 0
__lowercase = max(lowerCAmelCase_ , 0 )
while i < number_of_cells:
__lowercase = (
randint(0 , lowerCAmelCase_ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ) -> Any:
__lowercase = 0
__lowercase = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase_ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase_ , -1 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
__lowercase = len(lowerCAmelCase_ )
# Beforce calculations, the highway is empty
__lowercase = [-1] * number_of_cells
for car_index in range(lowerCAmelCase_ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__lowercase = min(highway_now[car_index] + 1 , lowerCAmelCase_ )
# Number of empty cell before the next car
__lowercase = get_distance(lowerCAmelCase_ , lowerCAmelCase_ ) - 1
# We can't have the car causing an accident
__lowercase = min(next_highway[car_index] , lowerCAmelCase_ )
if random() < probability:
# Randomly, a driver will slow down
__lowercase = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str ) -> Tuple:
__lowercase = len(highway[0] )
for i in range(lowerCAmelCase_ ):
__lowercase = update(highway[i] , lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase = [-1] * number_of_cells
for car_index in range(lowerCAmelCase_ ):
__lowercase = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__lowercase = (car_index + speed) % number_of_cells
# Commit the change of position
__lowercase = speed
highway.append(lowerCAmelCase_ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ) -> float:
__lowercase = u
for i in range(1 , SCREAMING_SNAKE_CASE ):
__lowercase = temp * (u - i)
return temp
def __SCREAMING_SNAKE_CASE ( ) -> None:
__lowercase = int(input('enter the numbers of values: ' ) )
__lowercase = []
for _ in range(SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
y[i].append(SCREAMING_SNAKE_CASE )
__lowercase = 0
print('enter the values of parameters in a list: ' )
__lowercase = list(map(SCREAMING_SNAKE_CASE , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(SCREAMING_SNAKE_CASE ):
__lowercase = float(input() )
__lowercase = int(input('enter the value to interpolate: ' ) )
__lowercase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
__lowercase = y[j + 1][i - 1] - y[j][i - 1]
__lowercase = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
summ += (ucal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 688 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = ["torch", "transformers", "onnx"]
def __init__( self : Dict , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : Tuple , *_UpperCAmelCase : Dict , **_UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : Optional[int] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = ["torch", "transformers", "onnx"]
def __init__( self : Tuple , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : str ) -> int:
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : Optional[int] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : int , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = ["torch", "transformers", "onnx"]
def __init__( self : Tuple , *_UpperCAmelCase : Any , **_UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : Union[str, Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : Tuple , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = ["torch", "transformers", "onnx"]
def __init__( self : Optional[int] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : Union[str, Any] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : Tuple , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> str:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=lowerCAmelCase__ ):
lowerCAmelCase__ : str = ["torch", "transformers", "onnx"]
def __init__( self : str , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : str , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self : Dict , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : List[Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : List[str] , *_UpperCAmelCase : int , **_UpperCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 708 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
__lowercase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
__lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 688 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = pd.read_csv("""sample_data.csv""", header=None)
SCREAMING_SNAKE_CASE__ = df.shape[:1][0]
# If you're using some other dataset input the target column
SCREAMING_SNAKE_CASE__ = df.iloc[:, 1:2]
SCREAMING_SNAKE_CASE__ = actual_data.values.reshape(len_data, 1)
SCREAMING_SNAKE_CASE__ = MinMaxScaler().fit_transform(actual_data)
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 5
SCREAMING_SNAKE_CASE__ = 20
SCREAMING_SNAKE_CASE__ = len_data - periods * look_back
SCREAMING_SNAKE_CASE__ = actual_data[:division]
SCREAMING_SNAKE_CASE__ = actual_data[division - look_back :]
SCREAMING_SNAKE_CASE__ = [], []
SCREAMING_SNAKE_CASE__ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
SCREAMING_SNAKE_CASE__ = np.array(train_x)
SCREAMING_SNAKE_CASE__ = np.array(test_x)
SCREAMING_SNAKE_CASE__ = np.array([list(i.ravel()) for i in train_y])
SCREAMING_SNAKE_CASE__ = np.array([list(i.ravel()) for i in test_y])
SCREAMING_SNAKE_CASE__ = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
SCREAMING_SNAKE_CASE__ = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
SCREAMING_SNAKE_CASE__ = model.predict(x_test)
| 710 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = ProphetNetTokenizer
lowerCAmelCase__ : str = False
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : str , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = 'UNwant\u00E9d,running'
__lowercase = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowercase = {}
for i, token in enumerate(_UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowercase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
__lowercase = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 688 | 0 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
SCREAMING_SNAKE_CASE__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
__lowercase = [image]
__lowercase = [trans(img.convert('RGB' ) ) for img in image]
__lowercase = torch.stack(SCREAMING_SNAKE_CASE )
return image
class A__ ( snake_case__ ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
__lowercase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_A , scheduler=_A )
def a__ ( self : int , _UpperCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def a__ ( self : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
__lowercase = min(int(num_inference_steps * strength ) , _A )
__lowercase = max(num_inference_steps - init_timestep , 0 )
__lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any]=None ) -> Tuple:
"""simple docstring"""
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}""" )
__lowercase = image.to(device=_A , dtype=_A )
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__lowercase = init_latents.shape
__lowercase = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
print('add noise to latents at timestep' , _A )
__lowercase = self.scheduler.add_noise(_A , _A , _A )
__lowercase = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , _UpperCAmelCase : List[str] = None , _UpperCAmelCase : Any = 0.8 , _UpperCAmelCase : List[Any] = 1 , _UpperCAmelCase : Dict = None , _UpperCAmelCase : List[str] = 0.0 , _UpperCAmelCase : str = 50 , _UpperCAmelCase : Tuple = None , _UpperCAmelCase : int = "pil" , _UpperCAmelCase : str = True , ) -> List[str]:
"""simple docstring"""
self.check_inputs(_A )
# 2. Preprocess image
__lowercase = preprocess(_A )
# 3. set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
__lowercase = self.get_timesteps(_A , _A , self.device )
__lowercase = timesteps[:1].repeat(_A )
# 4. Prepare latent variables
__lowercase = self.prepare_latents(_A , _A , _A , self.unet.dtype , self.device , _A )
__lowercase = latents
# 5. Denoising loop
for t in self.progress_bar(_A ):
# 1. predict noise model_output
__lowercase = self.unet(_A , _A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__lowercase = self.scheduler.step(
_A , _A , _A , eta=_A , use_clipped_model_output=_A , generator=_A , ).prev_sample
__lowercase = (image / 2 + 0.5).clamp(0 , 1 )
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(_A )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_A )
| 711 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""jukebox""": 512,
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase__ : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=["v3", "v2", "v2"] , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Union[str, Any]="<|endoftext|>" , **_UpperCAmelCase : Tuple , ) -> List[Any]:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
unk_token=_UpperCAmelCase , n_genres=_UpperCAmelCase , version=_UpperCAmelCase , max_n_lyric_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = version
__lowercase = max_n_lyric_tokens
__lowercase = n_genres
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
__lowercase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowercase = oov.replace(R'\-\'' , R'\-+\'' )
__lowercase = regex.compile(_UpperCAmelCase )
__lowercase = {v: k for k, v in self.artists_encoder.items()}
__lowercase = {v: k for k, v in self.genres_encoder.items()}
__lowercase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = [self.artists_encoder.get(_UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(_UpperCAmelCase ) ):
__lowercase = [self.genres_encoder.get(_UpperCAmelCase , 0 ) for genre in list_genres[genres]]
__lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowercase = [[self.lyrics_encoder.get(_UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def a__ ( self : str , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
return list(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self.prepare_for_tokenization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = self._tokenize(_UpperCAmelCase )
return artist, genre, lyrics
def a__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowercase = artists[idx].lower()
__lowercase = [genres[idx].lower()]
else:
__lowercase = self._normalize(artists[idx] ) + '.v2'
__lowercase = [
self._normalize(_UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
__lowercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
__lowercase = {vocab[index]: index + 1 for index in range(len(_UpperCAmelCase ) )}
__lowercase = 0
__lowercase = len(_UpperCAmelCase ) + 1
__lowercase = self.vocab
__lowercase = {v: k for k, v in self.vocab.items()}
__lowercase = ''
else:
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
__lowercase = self._run_strip_accents(_UpperCAmelCase )
__lowercase = lyrics.replace('\\' , '\n' )
__lowercase = self.out_of_vocab.sub('' , _UpperCAmelCase ), [], []
return artists, genres, lyrics
def a__ ( self : Tuple , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = unicodedata.normalize('NFD' , _UpperCAmelCase )
__lowercase = []
for char in text:
__lowercase = unicodedata.category(_UpperCAmelCase )
if cat == "Mn":
continue
output.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = (
[chr(_UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
__lowercase = frozenset(_UpperCAmelCase )
__lowercase = re.compile(R'_+' )
__lowercase = ''.join([c if c in accepted else '_' for c in text.lower()] )
__lowercase = pattern.sub('_' , _UpperCAmelCase ).strip('_' )
return text
def a__ ( self : List[str] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return " ".join(_UpperCAmelCase )
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = False ) -> int:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = TensorType(_UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
__lowercase = tf.constant
__lowercase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
__lowercase = torch.tensor
__lowercase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
__lowercase = jnp.array
__lowercase = _is_jax
else:
__lowercase = np.asarray
__lowercase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowercase = [inputs]
if not is_tensor(_UpperCAmelCase ):
__lowercase = as_tensor(_UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int="" , _UpperCAmelCase : Tuple="pt" ) -> BatchEncoding:
"""simple docstring"""
__lowercase = [0, 0, 0]
__lowercase = [artist] * len(self.version )
__lowercase = [genres] * len(self.version )
__lowercase , __lowercase , __lowercase = self.tokenize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase , __lowercase , __lowercase = self._convert_token_to_id(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = [-INFINITY] * len(full_tokens[-1] )
__lowercase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def a__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def a__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.artists_decoder.get(_UpperCAmelCase )
__lowercase = [self.genres_decoder.get(_UpperCAmelCase ) for genre in genres_index]
__lowercase = [self.lyrics_decoder.get(_UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 688 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A__ :
lowerCAmelCase__ : Optional[int] = MBartConfig
lowerCAmelCase__ : Optional[int] = {}
lowerCAmelCase__ : Optional[int] = "gelu"
def __init__( self : str , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int]=13 , _UpperCAmelCase : Optional[int]=7 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : List[Any]=99 , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : List[str]=37 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Tuple=20 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Optional[int]=0 , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase = prepare_mbart_inputs_dict(__a , __a , __a )
return config, inputs_dict
def a__ ( self : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFMBartModel(config=__a ).get_decoder()
__lowercase = inputs_dict['input_ids']
__lowercase = input_ids[:1, :]
__lowercase = inputs_dict['attention_mask'][:1, :]
__lowercase = inputs_dict['head_mask']
__lowercase = 1
# first forward pass
__lowercase = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
__lowercase , __lowercase = outputs.to_tuple()
__lowercase = past_key_values[1]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Optional[int]=None , ) -> Optional[int]:
if attention_mask is None:
__lowercase = tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Optional[Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowerCAmelCase__ : List[str] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase__ : Dict = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : Tuple = False
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = TFMBartModelTester(self )
__lowercase = ConfigTester(self , config_class=__a )
def a__ ( self : str ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
@require_sentencepiece
@require_tokenizers
@require_tf
class A__ ( unittest.TestCase ):
lowerCAmelCase__ : List[str] = [
" UN Chief Says There Is No Military Solution in Syria",
]
lowerCAmelCase__ : Tuple = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
lowerCAmelCase__ : str = "facebook/mbart-large-en-ro"
@cached_property
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def a__ ( self : List[str] , **_UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.translate_src_text(**__a )
self.assertListEqual(self.expected_text , __a )
def a__ ( self : Union[str, Any] , **_UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
__lowercase = self.tokenizer(self.src_text , **__a , return_tensors='tf' )
__lowercase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__lowercase = self.tokenizer.batch_decode(__a , skip_special_tokens=__a )
return generated_words
@slow
def a__ ( self : int ) -> Any:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 712 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=36 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Any=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
__lowercase = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[Any] = True
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int=False ) -> Tuple:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = AlbertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a__ ( self : int ) -> Any:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = AlbertModel.from_pretrained('albert-base-v2' )
__lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 688 | 0 |
SCREAMING_SNAKE_CASE__ = {}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]:
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__lowercase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__lowercase = _calculate(days - 1 , lowerCamelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__lowercase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__lowercase = _calculate(days - 1 , lowerCamelCase_ , 0 )
__lowercase = state_late + state_absent + state_ontime
__lowercase = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str = 30 ) -> List[str]:
return _calculate(lowerCamelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 713 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : str = "▁" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[str, AddedToken] = "<unk>" , _UpperCAmelCase : Union[str, AddedToken] = "</s>" , _UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['token']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__lowercase = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__lowercase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> str:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : int , _UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> Dict:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['unk']['id']
__lowercase = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 688 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714 |
import string
from math import logaa
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> int:
__lowercase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__lowercase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> tuple[int, int]:
__lowercase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase = corpus_without_punctuation.split('\n' )
__lowercase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
return round(tf * idf , 3 )
| 688 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# TODO Update this
SCREAMING_SNAKE_CASE__ = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A__ ( _UpperCamelCase ):
lowerCAmelCase__ : List[Any] = "esm"
def __init__( self : Any , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=7_68 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Optional[int]=30_72 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Union[str, Any]=10_26 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : str=1e-1_2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , mask_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = emb_layer_norm_before
__lowercase = token_dropout
__lowercase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
__lowercase = EsmFoldConfig()
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = EsmFoldConfig(**_UpperCAmelCase )
__lowercase = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
__lowercase = get_default_vocab_list()
else:
__lowercase = vocab_list
else:
__lowercase = None
__lowercase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , _UpperCAmelCase ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = super().to_dict()
if isinstance(self.esmfold_config , _UpperCAmelCase ):
__lowercase = self.esmfold_config.to_dict()
return output
@dataclass
class A__ :
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : int = True
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : str = False
lowerCAmelCase__ : List[str] = 128
lowerCAmelCase__ : Dict = None
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
if self.trunk is None:
__lowercase = TrunkConfig()
elif isinstance(self.trunk , _UpperCAmelCase ):
__lowercase = TrunkConfig(**self.trunk )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.trunk.to_dict()
return output
@dataclass
class A__ :
lowerCAmelCase__ : Optional[int] = 48
lowerCAmelCase__ : Dict = 1024
lowerCAmelCase__ : Union[str, Any] = 128
lowerCAmelCase__ : List[str] = 32
lowerCAmelCase__ : Optional[Any] = 32
lowerCAmelCase__ : Any = 32
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Dict = False
lowerCAmelCase__ : Tuple = 4
lowerCAmelCase__ : Union[str, Any] = 128
lowerCAmelCase__ : Dict = None
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
if self.structure_module is None:
__lowercase = StructureModuleConfig()
elif isinstance(self.structure_module , _UpperCAmelCase ):
__lowercase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
__lowercase = self.sequence_state_dim // self.sequence_head_width
__lowercase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.structure_module.to_dict()
return output
@dataclass
class A__ :
lowerCAmelCase__ : Optional[int] = 384
lowerCAmelCase__ : List[Any] = 128
lowerCAmelCase__ : List[str] = 16
lowerCAmelCase__ : List[Any] = 128
lowerCAmelCase__ : str = 12
lowerCAmelCase__ : List[str] = 4
lowerCAmelCase__ : Tuple = 8
lowerCAmelCase__ : List[Any] = 0.1
lowerCAmelCase__ : Dict = 8
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : int = 2
lowerCAmelCase__ : Any = 7
lowerCAmelCase__ : List[Any] = 10
lowerCAmelCase__ : Optional[Any] = 1e-8
lowerCAmelCase__ : Optional[Any] = 1e5
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return asdict(self )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 715 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "retribert"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=3_05_22 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=5_12 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=1e-1_2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = share_encoders
__lowercase = projection_dim
| 688 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ :
def __init__( self : List[Any] , _UpperCAmelCase : str = None , _UpperCAmelCase : uuid.UUID = None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : int=None ) -> Dict:
"""simple docstring"""
if not conversation_id:
__lowercase = uuid.uuida()
if past_user_inputs is None:
__lowercase = []
if generated_responses is None:
__lowercase = []
__lowercase = conversation_id
__lowercase = past_user_inputs
__lowercase = generated_responses
__lowercase = text
def __eq__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def a__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
__lowercase = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
__lowercase = text
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase = None
def a__ ( self : Dict , _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_ )
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
__lowercase = 'user' if is_user else 'bot'
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
lowerCAmelCase__ , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_ )
if self.tokenizer.pad_token_id is None:
__lowercase = self.tokenizer.eos_token
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : int=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase = {}
__lowercase = {}
__lowercase = {}
if min_length_for_response is not None:
__lowercase = min_length_for_response
if minimum_tokens is not None:
__lowercase = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , _UpperCAmelCase : Union[Conversation, List[Conversation]] , _UpperCAmelCase : str=0 , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) == 1:
return outputs[0]
return outputs
def a__ ( self : List[Any] , _UpperCAmelCase : Conversation , _UpperCAmelCase : Any=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
__lowercase = self.tokenizer._build_conversation_input_ids(lowercase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase = self._legacy_parse_and_tokenize(lowercase_ )
if self.framework == "pt":
__lowercase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int]=10 , **_UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
__lowercase = generate_kwargs.get('max_length' , self.model.config.max_length )
__lowercase = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
__lowercase = max_length - minimum_tokens
__lowercase = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase = model_inputs['attention_mask'][:, -trim:]
__lowercase = model_inputs.pop('conversation' )
__lowercase = max_length
__lowercase = self.model.generate(**lowercase_ , **lowercase_ )
if self.model.config.is_encoder_decoder:
__lowercase = 1
else:
__lowercase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int=True ) -> List[Any]:
"""simple docstring"""
__lowercase = model_outputs['output_ids']
__lowercase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
__lowercase = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(lowercase_ )
return conversation
def a__ ( self : Any , _UpperCAmelCase : Conversation ) -> Dict:
"""simple docstring"""
__lowercase = self.tokenizer.eos_token_id
__lowercase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
if len(lowercase_ ) > self.tokenizer.model_max_length:
__lowercase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class A__ ( __lowerCAmelCase ):
lowerCAmelCase__ : Tuple = '''dpt'''
def __init__( self : str , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : str=30_72 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : Union[str, Any]=1e-1_2 , _UpperCAmelCase : Dict=3_84 , _UpperCAmelCase : Dict=16 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=False , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple=[2, 5, 8, 11] , _UpperCAmelCase : Optional[int]="project" , _UpperCAmelCase : Optional[Any]=[4, 2, 1, 0.5] , _UpperCAmelCase : List[str]=[96, 1_92, 3_84, 7_68] , _UpperCAmelCase : Any=2_56 , _UpperCAmelCase : Union[str, Any]=-1 , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]=0.4 , _UpperCAmelCase : Union[str, Any]=2_55 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=[1, 10_24, 24, 24] , _UpperCAmelCase : List[str]=[0, 1] , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
__lowercase = hidden_size
__lowercase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
__lowercase = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
__lowercase = BitConfig(**lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
logger.info('Initializing the config with a `BiT` backbone.' )
__lowercase = BitConfig(**lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = backbone_config
else:
raise ValueError(
f"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
__lowercase = backbone_featmap_shape
__lowercase = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
__lowercase = None
__lowercase = None
__lowercase = []
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
__lowercase = readout_type
__lowercase = reassemble_factors
__lowercase = neck_hidden_sizes
__lowercase = fusion_hidden_size
__lowercase = head_in_index
__lowercase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
__lowercase = use_auxiliary_head
__lowercase = auxiliary_loss_weight
__lowercase = semantic_loss_ignore_index
__lowercase = semantic_classifier_dropout
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 717 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = ["image_processor", "tokenizer"]
lowerCAmelCase__ : Union[str, Any] = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ : Union[str, Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features['words']
__lowercase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__lowercase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowercase = images
return encoded_inputs
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def a__ ( self : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 688 | 0 |
'''simple docstring'''
class A__ :
def __init__( self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = name
__lowercase = val
def __str__( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Tuple , _UpperCAmelCase : Any ) -> Any:
"""simple docstring"""
return self.val < other.val
class A__ :
def __init__( self : int , _UpperCAmelCase : Any ) -> int:
"""simple docstring"""
__lowercase = {}
__lowercase = {}
__lowercase = self.build_heap(UpperCAmelCase__ )
def __getitem__( self : Optional[int] , _UpperCAmelCase : Tuple ) -> str:
"""simple docstring"""
return self.get_value(UpperCAmelCase__ )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return (idx - 1) // 2
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return idx * 2 + 1
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
return idx * 2 + 2
def a__ ( self : Any , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self.heap_dict[key]
def a__ ( self : Optional[int] , _UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = len(UpperCAmelCase__ ) - 1
__lowercase = self.get_parent_idx(UpperCAmelCase__ )
for idx, i in enumerate(UpperCAmelCase__ ):
__lowercase = idx
__lowercase = i.val
for i in range(UpperCAmelCase__ , -1 , -1 ):
self.sift_down(UpperCAmelCase__ , UpperCAmelCase__ )
return array
def a__ ( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
while True:
__lowercase = self.get_left_child_idx(UpperCAmelCase__ ) # noqa: E741
__lowercase = self.get_right_child_idx(UpperCAmelCase__ )
__lowercase = idx
if l < len(UpperCAmelCase__ ) and array[l] < array[idx]:
__lowercase = l
if r < len(UpperCAmelCase__ ) and array[r] < array[smallest]:
__lowercase = r
if smallest != idx:
__lowercase = array[smallest], array[idx]
(
__lowercase
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__lowercase = smallest
else:
break
def a__ ( self : Dict , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = self.get_parent_idx(UpperCAmelCase__ )
while p >= 0 and self.heap[p] > self.heap[idx]:
__lowercase = self.heap[idx], self.heap[p]
__lowercase = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__lowercase = p
__lowercase = self.get_parent_idx(UpperCAmelCase__ )
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return self.heap[0]
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = self.heap[-1], self.heap[0]
__lowercase = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__lowercase = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def a__ ( self : str , _UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
self.heap.append(UpperCAmelCase__ )
__lowercase = len(self.heap ) - 1
__lowercase = node.val
self.sift_up(len(self.heap ) - 1 )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.heap ) == 0
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__lowercase = new_value
__lowercase = new_value
self.sift_up(self.idx_of_element[node] )
SCREAMING_SNAKE_CASE__ = Node("""R""", -1)
SCREAMING_SNAKE_CASE__ = Node("""B""", 6)
SCREAMING_SNAKE_CASE__ = Node("""A""", 3)
SCREAMING_SNAKE_CASE__ = Node("""X""", 1)
SCREAMING_SNAKE_CASE__ = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
SCREAMING_SNAKE_CASE__ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ :
lowerCAmelCase__ : Optional[int] = "dummy_data"
lowerCAmelCase__ : str = "datasets"
lowerCAmelCase__ : Dict = False
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[Version, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[List[Callable]] = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 0
__lowercase = dataset_name
__lowercase = cache_dir
__lowercase = use_local_dummy_data
__lowercase = config
# download_callbacks take a single url as input
__lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowercase = str(_UpperCAmelCase )
# to be downloaded
__lowercase = None
__lowercase = None
@property
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self._dummy_file is None:
__lowercase = self.download_dummy_data()
return self._dummy_file
@property
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowercase = cached_path(
_UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCAmelCase , force_extract=_UpperCAmelCase )
return os.path.join(_UpperCAmelCase , self.dummy_file_name )
@property
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self._bucket_url is None:
__lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , *_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return self.create_dummy_data_dict(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(_UpperCAmelCase , _UpperCAmelCase )
else:
return self.create_dummy_data_single(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple , *_UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
return path
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return {}
def a__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for single_url in single_urls:
download_callback(_UpperCAmelCase )
else:
__lowercase = single_urls
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) ) for x in single_urls]
else:
__lowercase = single_urls
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) )
__lowercase = value
# make sure that values are unique
if all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , _UpperCAmelCase ) ) for url in data_url )
__lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowercase = [data_url[0]] * len(_UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_UpperCAmelCase )
return dummy_data_list
def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def a__ ( self : int ) -> str:
"""simple docstring"""
pass
def a__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
def _iter_archive_members(_UpperCAmelCase : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__lowercase = Path(self.dummy_file ).parent
__lowercase = path.relative_to(_UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_UpperCAmelCase )
__lowercase = Path(_UpperCAmelCase )
__lowercase = _iter_archive_members(_UpperCAmelCase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_UpperCAmelCase ).as_posix(), file_path.open('rb' )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [paths]
for path in paths:
if os.path.isfile(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_UpperCAmelCase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase )
| 688 | 0 |
SCREAMING_SNAKE_CASE__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> bytes:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = F"""a bytes-like object is required, not \'{data.__class__.__name__}\'"""
raise TypeError(SCREAMING_SNAKE_CASE )
__lowercase = "".join(bin(SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
__lowercase = len(SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
__lowercase = b"=" * ((6 - len(SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE ) % 6)
else:
__lowercase = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> bytes:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = (
"argument should be a bytes-like object or ASCII string, "
F"""not \'{encoded_data.__class__.__name__}\'"""
)
raise TypeError(SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
try:
__lowercase = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
__lowercase = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__lowercase = encoded_data[:-padding]
__lowercase = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__lowercase = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
__lowercase = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
import math
import sys
import cva
import numpy as np
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__lowercase = math.sqrt(SCREAMING_SNAKE_CASE )
__lowercase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> np.ndarray:
__lowercase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__lowercase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , SCREAMING_SNAKE_CASE ):
for j in range(0 , SCREAMING_SNAKE_CASE ):
__lowercase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , ) -> np.ndarray:
__lowercase = np.zeros(img.shape )
__lowercase = get_gauss_kernel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__lowercase = get_slice(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = img_s - img_s[kernel_size // 2, kernel_size // 2]
__lowercase = vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.sum(SCREAMING_SNAKE_CASE ) / np.sum(SCREAMING_SNAKE_CASE )
__lowercase = val
return imga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list ) -> tuple:
__lowercase = args[1] if args[1:] else '../image_data/lena.jpg'
__lowercase = float(args[2] ) if args[2:] else 1.0
__lowercase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__lowercase = int(args[4] )
__lowercase = kernel_size + abs(kernel_size % 2 - 1 )
else:
__lowercase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = parse_args(sys.argv)
SCREAMING_SNAKE_CASE__ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
SCREAMING_SNAKE_CASE__ = img / 255
SCREAMING_SNAKE_CASE__ = out.astype("""float32""")
SCREAMING_SNAKE_CASE__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
SCREAMING_SNAKE_CASE__ = out * 255
SCREAMING_SNAKE_CASE__ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 688 | 0 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : Any = DownBlockaD # noqa F405
lowerCAmelCase__ : List[Any] = "down"
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = [-0.0_232, -0.9_869, 0.8_054, -0.0_637, -0.1_688, -1.4_264, 0.4_470, -1.3_394, 0.0_904]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : Dict = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ : int = "down"
def a__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = [0.0_710, 0.2_410, -0.7_320, -1.0_757, -1.1_343, 0.3_540, -0.0_133, -0.2_576, 0.0_948]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : Dict = AttnDownBlockaD # noqa F405
lowerCAmelCase__ : List[str] = "down"
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = [0.0_636, 0.8_964, -0.6_234, -1.0_131, 0.0_844, 0.4_935, 0.3_437, 0.0_911, -0.2_957]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ : Tuple = "down"
def a__ ( self : int ) -> str:
"""simple docstring"""
__lowercase , __lowercase = super().prepare_init_args_and_inputs_for_common()
__lowercase = 32
return init_dict, inputs_dict
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = [0.2_238, -0.7_396, -0.2_255, -0.3_829, 0.1_925, 1.1_665, 0.0_603, -0.7_295, 0.1_983]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : int = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ : Tuple = "down"
@property
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase = super().prepare_init_args_and_inputs_for_common()
__lowercase = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = [0.7_921, -0.0_992, -0.1_962, -0.7_695, -0.4_242, 0.7_804, 0.4_737, 0.2_765, 0.3_338]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : List[str] = SkipDownBlockaD # noqa F405
lowerCAmelCase__ : List[Any] = "down"
@property
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=SCREAMING_SNAKE_CASE_ )
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = [-0.0_845, -0.2_087, -0.2_465, 0.0_971, 0.1_900, -0.0_484, 0.2_664, 0.4_179, 0.5_069]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : Union[str, Any] = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ : Any = "down"
@property
def a__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=SCREAMING_SNAKE_CASE_ )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = [0.5_539, 0.1_609, 0.4_924, 0.0_537, -0.1_995, 0.4_050, 0.0_979, -0.2_721, -0.0_642]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : Union[str, Any] = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ : Optional[Any] = "down"
@property
def a__ ( self : Any ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = {
'in_channels': 32,
'out_channels': 32,
}
__lowercase = self.dummy_input
return init_dict, inputs_dict
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = [1.1_102, 0.5_302, 0.4_872, -0.0_023, -0.8_042, 0.0_483, -0.3_489, -0.5_632, 0.7_626]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : List[Any] = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ : List[str] = "down"
@property
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = {
'in_channels': 32,
'out_channels': 32,
}
__lowercase = self.dummy_input
return init_dict, inputs_dict
def a__ ( self : int ) -> int:
"""simple docstring"""
__lowercase = [0.8_966, -0.1_486, 0.8_568, 0.8_141, -0.9_046, -0.1_342, -0.0_972, -0.7_417, 0.1_538]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : Optional[Any] = UNetMidBlockaD # noqa F405
lowerCAmelCase__ : Tuple = "mid"
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {
'in_channels': 32,
'temb_channels': 1_28,
}
__lowercase = self.dummy_input
return init_dict, inputs_dict
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = [-0.1_062, 1.7_248, 0.3_494, 1.4_569, -0.0_910, -1.2_421, -0.9_984, 0.6_736, 1.0_028]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ : Optional[int] = "mid"
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = super().prepare_init_args_and_inputs_for_common()
__lowercase = 32
return init_dict, inputs_dict
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = [0.0_187, 2.4_220, 0.4_484, 1.1_203, -0.6_121, -1.5_122, -0.8_270, 0.7_851, 1.8_335]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : Dict = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ : List[Any] = "mid"
@property
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = super().prepare_init_args_and_inputs_for_common()
__lowercase = 32
return init_dict, inputs_dict
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = [0.7_143, 1.9_974, 0.5_448, 1.3_977, 0.1_282, -1.1_237, -1.4_238, 0.5_530, 0.8_880]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : List[str] = UpBlockaD # noqa F405
lowerCAmelCase__ : Any = "up"
@property
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = [-0.2_041, -0.4_165, -0.3_022, 0.0_041, -0.6_628, -0.7_053, 0.1_928, -0.0_325, 0.0_523]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : Tuple = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ : Any = "up"
@property
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase = [0.2_287, 0.3_549, -0.1_346, 0.4_797, -0.1_715, -0.9_649, 0.7_305, -0.5_864, -0.6_244]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : Tuple = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ : int = "up"
@property
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = super().prepare_init_args_and_inputs_for_common()
__lowercase = 32
return init_dict, inputs_dict
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = [-0.1_403, -0.3_515, -0.0_420, -0.1_425, 0.3_167, 0.5_094, -0.2_181, 0.5_931, 0.5_582]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : Dict = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ : List[Any] = "up"
@property
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = super().prepare_init_args_and_inputs_for_common()
__lowercase = 32
return init_dict, inputs_dict
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = [0.2_645, 0.1_480, 0.0_909, 0.8_044, -0.9_758, -0.9_083, 0.0_994, -1.1_453, -0.7_402]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : Tuple = AttnUpBlockaD # noqa F405
lowerCAmelCase__ : List[Any] = "up"
@property
def a__ ( self : Any ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = [0.0_979, 0.1_326, 0.0_021, 0.0_659, 0.2_249, 0.0_059, 0.1_132, 0.5_952, 0.1_033]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : List[Any] = SkipUpBlockaD # noqa F405
lowerCAmelCase__ : Any = "up"
@property
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def a__ ( self : Any ) -> str:
"""simple docstring"""
__lowercase = [-0.0_893, -0.1_234, -0.1_506, -0.0_332, 0.0_123, -0.0_211, 0.0_566, 0.0_143, 0.0_362]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : Dict = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ : Optional[Any] = "up"
@property
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = [0.0_361, 0.0_617, 0.2_787, -0.0_350, 0.0_342, 0.3_421, -0.0_843, 0.0_913, 0.3_015]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : List[str] = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ : List[str] = "up"
@property
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
__lowercase = {'in_channels': 32, 'out_channels': 32}
__lowercase = self.dummy_input
return init_dict, inputs_dict
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = [0.4_404, 0.1_998, -0.9_886, -0.3_320, -0.3_128, -0.7_034, -0.6_955, -0.2_338, -0.3_137]
super().test_output(SCREAMING_SNAKE_CASE_ )
class A__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ : Tuple = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ : Any = "up"
@property
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = {'in_channels': 32, 'out_channels': 32}
__lowercase = self.dummy_input
return init_dict, inputs_dict
def a__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = [0.6_738, 0.4_491, 0.1_055, 1.0_710, 0.7_316, 0.3_339, 0.3_352, 0.1_023, 0.3_568]
super().test_output(SCREAMING_SNAKE_CASE_ ) | 720 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : Tuple=4_00 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
__lowercase = size if size is not None else {'height': 18, 'width': 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = apply_ocr
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr' ) )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
pass
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase )
self.assertIsInstance(encoding.boxes , _UpperCAmelCase )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowercase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__lowercase = Image.open(ds[0]['file'] ).convert('RGB' )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowercase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__lowercase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase )
self.assertListEqual(encoding.boxes , _UpperCAmelCase )
# with apply_OCR = False
__lowercase = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 688 | 0 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class A__ ( _snake_case ):
lowerCAmelCase__ : int = """EncodecFeatureExtractor"""
lowerCAmelCase__ : str = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
super().__init__(snake_case_ , snake_case_ )
__lowercase = self.feature_extractor
__lowercase = False
def a__ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Union[str, Any]=True ) -> Tuple:
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=snake_case_ , language=snake_case_ , no_timestamps=snake_case_ )
def __call__( self : List[Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case_ , **snake_case_ )
__lowercase = kwargs.pop('audio' , snake_case_ )
__lowercase = kwargs.pop('sampling_rate' , snake_case_ )
__lowercase = kwargs.pop('text' , snake_case_ )
if len(snake_case_ ) > 0:
__lowercase = args[0]
__lowercase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
__lowercase = self.tokenizer(snake_case_ , **snake_case_ )
if audio is not None:
__lowercase = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__lowercase = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
__lowercase = audio_inputs["padding_mask"]
return inputs
def a__ ( self : List[Any] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
__lowercase = kwargs.pop('audio' , snake_case_ )
__lowercase = kwargs.pop('padding_mask' , snake_case_ )
if len(snake_case_ ) > 0:
__lowercase = args[0]
__lowercase = args[1:]
if audio_values is not None:
return self._decode_audio(snake_case_ , padding_mask=snake_case_ )
else:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def a__ ( self : List[Any] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
def a__ ( self : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] = None ) -> Tuple:
"""simple docstring"""
__lowercase = to_numpy(snake_case_ )
__lowercase = audio_values.shape
if padding_mask is None:
return list(snake_case_ )
__lowercase = to_numpy(snake_case_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__lowercase = seq_len - padding_mask.shape[-1]
__lowercase = 1 - self.feature_extractor.padding_value
__lowercase = np.pad(snake_case_ , ((0, 0), (0, difference)) , 'constant' , constant_values=snake_case_ )
__lowercase = audio_values.tolist()
for i in range(snake_case_ ):
__lowercase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__lowercase = sliced_audio.reshape(snake_case_ , -1 )
return audio_values
| 721 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "umt5"
lowerCAmelCase__ : Tuple = ["past_key_values"]
def __init__( self : str , _UpperCAmelCase : int=25_01_12 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : List[str]=64 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : str=8 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : str=32 , _UpperCAmelCase : Optional[int]=1_28 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : str=1e-6 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str="gated-gelu" , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Tuple="T5Tokenizer" , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : int=1 , _UpperCAmelCase : List[str]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=_UpperCAmelCase , tokenizer_class=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_heads
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = self.feed_forward_proj.split('-' )
__lowercase = act_info[-1]
__lowercase = act_info[0] == 'gated'
if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__lowercase = 'gelu_new'
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.d_model
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.num_heads
@property
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.num_layers
class A__ ( lowerCAmelCase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowercase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase = 'past_encoder_sequence + sequence'
__lowercase = {0: 'batch'}
__lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return 13
@property
def a__ ( self : Dict ) -> float:
"""simple docstring"""
return 5e-4
| 688 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A__ ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = StableDiffusionPanoramaPipeline
lowerCAmelCase__ : int = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ : int = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__lowercase = DDIMScheduler()
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__lowercase = CLIPTextModel(UpperCAmelCase__ )
__lowercase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a__ ( self : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any=0 ) -> Optional[Any]:
"""simple docstring"""
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
__lowercase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs(UpperCAmelCase__ )
__lowercase = sd_pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self : Dict ) -> str:
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
__lowercase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs(UpperCAmelCase__ )
__lowercase = '''french fries'''
__lowercase = sd_pipe(**UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self : Dict ) -> int:
"""simple docstring"""
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
__lowercase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs(UpperCAmelCase__ )
__lowercase = sd_pipe(**UpperCAmelCase__ , view_batch_size=2 )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' )
__lowercase = StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
__lowercase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs(UpperCAmelCase__ )
__lowercase = sd_pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , skip_prk_steps=UpperCAmelCase__ )
__lowercase = StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
__lowercase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs(UpperCAmelCase__ )
__lowercase = sd_pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def a__ ( self : int ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : int , _UpperCAmelCase : int=0 ) -> Optional[Any]:
"""simple docstring"""
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase = '''stabilityai/stable-diffusion-2-base'''
__lowercase = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='scheduler' )
__lowercase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
__lowercase = self.get_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
__lowercase = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=UpperCAmelCase__ )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
__lowercase = self.get_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
__lowercase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = 0
def callback_fn(_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : torch.FloatTensor ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__lowercase = False
__lowercase = '''stabilityai/stable-diffusion-2-base'''
__lowercase = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='scheduler' )
__lowercase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
__lowercase = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
__lowercase = self.get_inputs()
pipe(**UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase = '''stabilityai/stable-diffusion-2-base'''
__lowercase = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='scheduler' )
__lowercase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
__lowercase = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowercase = self.get_inputs()
__lowercase = pipe(**UpperCAmelCase__ )
__lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 700 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : int=5_02_65 , _UpperCAmelCase : Union[str, Any]=7_68 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[str]=30_72 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Tuple=1_28 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : int=64 , _UpperCAmelCase : List[str]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=2_24 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = version.parse("1.12" )
@property
def a__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : Any ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return 12
def a__ ( self : Tuple , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 688 | 0 |
from __future__ import annotations
from random import random
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : int | None = None ) -> int:
"""simple docstring"""
__lowercase = value
__lowercase = random()
__lowercase = None
__lowercase = None
def __repr__( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = str(self.value ) + ' '
__lowercase = str(self.left or '' )
__lowercase = str(self.right or '' )
return value + left + right
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__lowercase , __lowercase = split(root.left , _lowerCAmelCase )
return left, root
else:
__lowercase , __lowercase = split(root.right , _lowerCAmelCase )
return root, right
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__lowercase = merge(left.right , _lowerCAmelCase )
return left
else:
__lowercase = merge(_lowerCAmelCase , right.left )
return right
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
__lowercase = Node(_lowerCAmelCase )
__lowercase , __lowercase = split(_lowerCAmelCase , _lowerCAmelCase )
return merge(merge(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> List[Any]:
__lowercase , __lowercase = split(_lowerCAmelCase , value - 1 )
__lowercase , __lowercase = split(_lowerCAmelCase , _lowerCAmelCase )
return merge(_lowerCAmelCase , _lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> List[str]:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
for arg in args.split():
if arg[0] == "+":
__lowercase = insert(_lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
__lowercase = erase(_lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__lowercase = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
__lowercase = input()
while args != "q":
__lowercase = interact_treap(_lowerCAmelCase , _lowerCAmelCase )
print(_lowerCAmelCase )
__lowercase = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 701 |
from pathlib import Path
import numpy as np
from PIL import Image
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase , __lowercase , __lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase = np.zeros_like(SCREAMING_SNAKE_CASE )
__lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
SCREAMING_SNAKE_CASE__ = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 688 | 0 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Sequence[float] , SCREAMING_SNAKE_CASE : float ) -> Any:
return sum(c * (x**i) for i, c in enumerate(lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Sequence[float] , SCREAMING_SNAKE_CASE : float ) -> Tuple:
__lowercase = 0.0
for coeff in reversed(lowerCamelCase_ ):
__lowercase = result * x + coeff
return result
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = (0.0, 0.0, 5.0, 9.3, 7.0)
SCREAMING_SNAKE_CASE__ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 702 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = ["pixel_values"]
def __init__( self : Tuple , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 2_55 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : bool = True , **_UpperCAmelCase : str , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = size if size is not None else {'height': 3_84, 'width': 3_84}
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase = do_convert_rgb
def a__ ( self : int , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
__lowercase = (size['height'], size['width'])
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : ImageInput , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Dict[str, int]] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : int , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__lowercase = BatchFeature(data={'pixel_values': images} , tensor_type=_UpperCAmelCase )
return encoded_outputs
| 688 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class A__ ( snake_case__ ):
lowerCAmelCase__ : str = VOCAB_FILES_NAMES
lowerCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : List[str] = ["input_ids", "attention_mask"]
lowerCAmelCase__ : Optional[Any] = None
def __init__( self : int , _UpperCAmelCase : Dict=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]="<unk>" , _UpperCAmelCase : int="<s>" , _UpperCAmelCase : Any="</s>" , _UpperCAmelCase : Optional[Any]="<pad>" , _UpperCAmelCase : str=False , _UpperCAmelCase : List[Any]=False , **_UpperCAmelCase : Optional[int] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , add_prefix_space=lowercase_ , clean_up_tokenization_spaces=lowercase_ , **lowercase_ , )
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
__lowercase = getattr(lowercase_ , pre_tok_state.pop('type' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**lowercase_ )
__lowercase = add_prefix_space
def a__ ( self : Optional[int] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
__lowercase = kwargs.get('is_split_into_words' , lowercase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_ , **lowercase_ )
def a__ ( self : Optional[int] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
__lowercase = kwargs.get('is_split_into_words' , lowercase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._encode_plus(*lowercase_ , **lowercase_ )
def a__ ( self : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] = None ) -> List[str]:
"""simple docstring"""
__lowercase = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 704 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Any:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__lowercase = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__lowercase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__lowercase = 4
__lowercase = True
# hparam_utils.py hparams
__lowercase = 0.664_694
__lowercase = 0.207_951
__lowercase = 0.121_194
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = 0.0_352_513
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowercase = 4
__lowercase = False
# hparam_utils.py hparams
__lowercase = 36.4_519
__lowercase = 0.903_421
__lowercase = 222.088
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 0.763_141
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__lowercase = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__lowercase = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__lowercase = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
__lowercase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688 | 0 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
SCREAMING_SNAKE_CASE__ = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple=False , ) -> int:
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , enable_onnx_checker=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
else:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] = False ) -> Optional[Any]:
__lowercase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowercase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__lowercase = 'cpu'
__lowercase = Path(SCREAMING_SNAKE_CASE )
# VAE DECODER
__lowercase = AutoencoderKL.from_pretrained(model_path + '/vae' )
__lowercase = vae_decoder.config.latent_channels
# forward only through the decoder part
__lowercase = vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=SCREAMING_SNAKE_CASE , )
del vae_decoder
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 705 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __SCREAMING_SNAKE_CASE ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 688 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> None:
__lowercase = analyze_text(_lowerCamelCase )
__lowercase = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
__lowercase = sum(single_char_strings.values() )
# one length string
__lowercase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__lowercase = single_char_strings[ch]
__lowercase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
__lowercase = sum(two_char_strings.values() )
__lowercase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__lowercase = cha + cha
if sequence in two_char_strings:
__lowercase = two_char_strings[sequence]
__lowercase = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> tuple[dict, dict]:
__lowercase = Counter() # type: ignore
__lowercase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 706 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """Hello, World!"""
SCREAMING_SNAKE_CASE__ = """en_XX"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ) -> Optional[int]:
__lowercase = Path('data_bin' )
__lowercase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe='sentencepiece' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
__lowercase = xmod.model.encoder.sentence_encoder
__lowercase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , SCREAMING_SNAKE_CASE )
__lowercase = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase = xmod_sent_encoder.embed_tokens.weight
__lowercase = xmod_sent_encoder.embed_positions.weight
__lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase = xmod_sent_encoder.layernorm_embedding.weight
__lowercase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase = model.roberta.encoder.layer[i]
__lowercase = xmod_sent_encoder.layers[i]
# self attention
__lowercase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowercase = xmod_layer.self_attn.q_proj.weight
__lowercase = xmod_layer.self_attn.q_proj.bias
__lowercase = xmod_layer.self_attn.k_proj.weight
__lowercase = xmod_layer.self_attn.k_proj.bias
__lowercase = xmod_layer.self_attn.v_proj.weight
__lowercase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowercase = xmod_layer.self_attn.out_proj.weight
__lowercase = xmod_layer.self_attn.out_proj.bias
__lowercase = xmod_layer.self_attn_layer_norm.weight
__lowercase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
# output
__lowercase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
__lowercase = xmod_layer.final_layer_norm.weight
__lowercase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase = xmod_layer.adapter_layer_norm.weight
__lowercase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase = bert_output.adapter_modules[lang_code]
__lowercase = xmod_layer.adapter_modules[lang_code]
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase = xmod_sent_encoder.layer_norm.weight
__lowercase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].dense.weight
__lowercase = xmod.model.classification_heads['mnli'].dense.bias
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight
__lowercase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowercase = xmod.model.encoder.lm_head.dense.weight
__lowercase = xmod.model.encoder.lm_head.dense.bias
__lowercase = xmod.model.encoder.lm_head.layer_norm.weight
__lowercase = xmod.model.encoder.lm_head.layer_norm.bias
__lowercase = xmod.model.encoder.lm_head.weight
__lowercase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
__lowercase = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
__lowercase = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowercase = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 688 | 0 |
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ :
def __init__( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = [
[],
[],
[],
]
def a__ ( self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(__UpperCamelCase )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def a__ ( self : int ) -> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self : str ) -> str:
"""simple docstring"""
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class A__ :
def __init__( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = []
def a__ ( self : str , _UpperCAmelCase : int ) -> None:
"""simple docstring"""
if len(self.queue ) == 1_00:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(__UpperCamelCase )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
__lowercase = min(self.queue )
self.queue.remove(__UpperCamelCase )
return data
def __str__( self : Optional[Any] ) -> str:
"""simple docstring"""
return str(self.queue )
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__lowercase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowercase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 707 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ) -> float:
__lowercase = u
for i in range(1 , SCREAMING_SNAKE_CASE ):
__lowercase = temp * (u - i)
return temp
def __SCREAMING_SNAKE_CASE ( ) -> None:
__lowercase = int(input('enter the numbers of values: ' ) )
__lowercase = []
for _ in range(SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
y[i].append(SCREAMING_SNAKE_CASE )
__lowercase = 0
print('enter the values of parameters in a list: ' )
__lowercase = list(map(SCREAMING_SNAKE_CASE , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(SCREAMING_SNAKE_CASE ):
__lowercase = float(input() )
__lowercase = int(input('enter the value to interpolate: ' ) )
__lowercase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
__lowercase = y[j + 1][i - 1] - y[j][i - 1]
__lowercase = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
summ += (ucal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 688 | 0 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ = 256
class A__ ( A_ ):
lowerCAmelCase__ : List[str] = ['''melgan''']
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
super().__init__()
# From MELGAN
__lowercase = math.log(1e-5 ) # Matches MelGAN training.
__lowercase = 4.0 # Largest value for most examples
__lowercase = 1_28
self.register_modules(
notes_encoder=_UpperCAmelCase , continuous_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase , scheduler=_UpperCAmelCase , melgan=_UpperCAmelCase , )
def a__ ( self : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict=(-1.0, 1.0) , _UpperCAmelCase : Union[str, Any]=False ) -> Any:
"""simple docstring"""
__lowercase = output_range
if clip:
__lowercase = torch.clip(_UpperCAmelCase , self.min_value , self.max_value )
# Scale to [0, 1].
__lowercase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def a__ ( self : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=(-1.0, 1.0) , _UpperCAmelCase : Dict=False ) -> Optional[int]:
"""simple docstring"""
__lowercase = input_range
__lowercase = torch.clip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if clip else outputs
# Scale to [0, 1].
__lowercase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def a__ ( self : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = input_tokens > 0
__lowercase = self.notes_encoder(
encoder_input_tokens=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase )
__lowercase = self.continuous_encoder(
encoder_inputs=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def a__ ( self : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = noise_time
if not torch.is_tensor(_UpperCAmelCase ):
__lowercase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0:
__lowercase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowercase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__lowercase = self.decoder(
encodings_and_masks=_UpperCAmelCase , decoder_input_tokens=_UpperCAmelCase , decoder_noise_time=_UpperCAmelCase )
return logits
@torch.no_grad()
def __call__( self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] = None , _UpperCAmelCase : Dict = 1_00 , _UpperCAmelCase : Union[str, Any] = True , _UpperCAmelCase : int = "numpy" , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : str = 1 , ) -> Any:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_UpperCAmelCase )}.""" )
__lowercase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__lowercase = np.zeros([1, 0, self.n_dims] , np.floataa )
__lowercase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device )
for i, encoder_input_tokens in enumerate(_UpperCAmelCase ):
if i == 0:
__lowercase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__lowercase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__lowercase = ones
__lowercase = self.scale_features(
_UpperCAmelCase , output_range=[-1.0, 1.0] , clip=_UpperCAmelCase )
__lowercase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_UpperCAmelCase , continuous_mask=_UpperCAmelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__lowercase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_UpperCAmelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__lowercase = self.decode(
encodings_and_masks=_UpperCAmelCase , input_tokens=_UpperCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__lowercase = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
__lowercase = self.scale_to_features(_UpperCAmelCase , input_range=[-1.0, 1.0] )
__lowercase = mel[:1]
__lowercase = mel.cpu().float().numpy()
__lowercase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCAmelCase , _UpperCAmelCase )
logger.info('Generated segment' , _UpperCAmelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
__lowercase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__lowercase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_UpperCAmelCase )
| 708 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
__lowercase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
__lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 688 | 0 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
SCREAMING_SNAKE_CASE__ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
for pegasus_name, hf_name in PATTERNS:
__lowercase = k.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return k
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ) -> PegasusForConditionalGeneration:
__lowercase = DEFAULTS.copy()
cfg_kwargs.update(SCREAMING_SNAKE_CASE )
__lowercase = PegasusConfig(**SCREAMING_SNAKE_CASE )
__lowercase = PegasusForConditionalGeneration(SCREAMING_SNAKE_CASE )
__lowercase = torch_model.model.state_dict()
__lowercase = {}
for k, v in tf_weights.items():
__lowercase = rename_state_dict_key(SCREAMING_SNAKE_CASE )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__lowercase = v.T
__lowercase = torch.tensor(SCREAMING_SNAKE_CASE , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__lowercase = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__lowercase = mapping['shared.weight']
__lowercase = mapping['shared.weight']
__lowercase = {k: torch.zeros_like(SCREAMING_SNAKE_CASE ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = torch_model.model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
__lowercase = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
__lowercase = tf.train.list_variables(SCREAMING_SNAKE_CASE )
__lowercase = {}
__lowercase = ['Adafactor', 'global_step']
for name, shape in tqdm(SCREAMING_SNAKE_CASE , desc='converting tf checkpoint to dict' ):
__lowercase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowercase = tf.train.load_variable(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = array
return tf_weights
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple ) -> str:
# save tokenizer first
__lowercase = Path(SCREAMING_SNAKE_CASE ).parent.name
__lowercase = task_specific_params[F"""summarization_{dataset}"""]['max_position_embeddings']
__lowercase = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=SCREAMING_SNAKE_CASE )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(SCREAMING_SNAKE_CASE )
# convert model
__lowercase = get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE )
__lowercase = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
__lowercase = task_specific_params
__lowercase = convert_pegasus(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(SCREAMING_SNAKE_CASE , Path(SCREAMING_SNAKE_CASE ) / 'pytorch_model.bin' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
if args.save_dir is None:
SCREAMING_SNAKE_CASE__ = Path(args.tf_ckpt_path).parent.name
SCREAMING_SNAKE_CASE__ = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 710 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = ProphetNetTokenizer
lowerCAmelCase__ : str = False
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : str , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = 'UNwant\u00E9d,running'
__lowercase = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowercase = {}
for i, token in enumerate(_UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowercase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
__lowercase = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 688 | 0 |
import requests
from bsa import BeautifulSoup
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : dict ) -> List[str]:
__lowercase = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE , params=SCREAMING_SNAKE_CASE ).content , 'html.parser' )
__lowercase = soup.find('div' , attrs={'class': 'gs_ri'} )
__lowercase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 711 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""jukebox""": 512,
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase__ : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=["v3", "v2", "v2"] , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Union[str, Any]="<|endoftext|>" , **_UpperCAmelCase : Tuple , ) -> List[Any]:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
unk_token=_UpperCAmelCase , n_genres=_UpperCAmelCase , version=_UpperCAmelCase , max_n_lyric_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = version
__lowercase = max_n_lyric_tokens
__lowercase = n_genres
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
__lowercase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowercase = oov.replace(R'\-\'' , R'\-+\'' )
__lowercase = regex.compile(_UpperCAmelCase )
__lowercase = {v: k for k, v in self.artists_encoder.items()}
__lowercase = {v: k for k, v in self.genres_encoder.items()}
__lowercase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = [self.artists_encoder.get(_UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(_UpperCAmelCase ) ):
__lowercase = [self.genres_encoder.get(_UpperCAmelCase , 0 ) for genre in list_genres[genres]]
__lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowercase = [[self.lyrics_encoder.get(_UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def a__ ( self : str , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
return list(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self.prepare_for_tokenization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = self._tokenize(_UpperCAmelCase )
return artist, genre, lyrics
def a__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowercase = artists[idx].lower()
__lowercase = [genres[idx].lower()]
else:
__lowercase = self._normalize(artists[idx] ) + '.v2'
__lowercase = [
self._normalize(_UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
__lowercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
__lowercase = {vocab[index]: index + 1 for index in range(len(_UpperCAmelCase ) )}
__lowercase = 0
__lowercase = len(_UpperCAmelCase ) + 1
__lowercase = self.vocab
__lowercase = {v: k for k, v in self.vocab.items()}
__lowercase = ''
else:
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
__lowercase = self._run_strip_accents(_UpperCAmelCase )
__lowercase = lyrics.replace('\\' , '\n' )
__lowercase = self.out_of_vocab.sub('' , _UpperCAmelCase ), [], []
return artists, genres, lyrics
def a__ ( self : Tuple , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = unicodedata.normalize('NFD' , _UpperCAmelCase )
__lowercase = []
for char in text:
__lowercase = unicodedata.category(_UpperCAmelCase )
if cat == "Mn":
continue
output.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = (
[chr(_UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
__lowercase = frozenset(_UpperCAmelCase )
__lowercase = re.compile(R'_+' )
__lowercase = ''.join([c if c in accepted else '_' for c in text.lower()] )
__lowercase = pattern.sub('_' , _UpperCAmelCase ).strip('_' )
return text
def a__ ( self : List[str] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return " ".join(_UpperCAmelCase )
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = False ) -> int:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = TensorType(_UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
__lowercase = tf.constant
__lowercase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
__lowercase = torch.tensor
__lowercase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
__lowercase = jnp.array
__lowercase = _is_jax
else:
__lowercase = np.asarray
__lowercase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowercase = [inputs]
if not is_tensor(_UpperCAmelCase ):
__lowercase = as_tensor(_UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int="" , _UpperCAmelCase : Tuple="pt" ) -> BatchEncoding:
"""simple docstring"""
__lowercase = [0, 0, 0]
__lowercase = [artist] * len(self.version )
__lowercase = [genres] * len(self.version )
__lowercase , __lowercase , __lowercase = self.tokenize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase , __lowercase , __lowercase = self._convert_token_to_id(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = [-INFINITY] * len(full_tokens[-1] )
__lowercase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def a__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def a__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.artists_decoder.get(_UpperCAmelCase )
__lowercase = [self.genres_decoder.get(_UpperCAmelCase ) for genre in genres_index]
__lowercase = [self.lyrics_decoder.get(_UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 688 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ) -> float:
__lowercase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__lowerCAmelCase )] )
__lowercase = np.array(__lowerCAmelCase )
__lowercase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __lowerCAmelCase ) ) , x.transpose() ) , __lowerCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ) -> float:
__lowercase = (1, 2, 1)
__lowercase = (1, 1, 0, 7)
__lowercase = SARIMAX(
__lowerCAmelCase , exog=__lowerCAmelCase , order=__lowerCAmelCase , seasonal_order=__lowerCAmelCase )
__lowercase = model.fit(disp=__lowerCAmelCase , maxiter=600 , method='nm' )
__lowercase = model_fit.predict(1 , len(__lowerCAmelCase ) , exog=[test_match] )
return result[0]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] ) -> float:
__lowercase = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__lowerCAmelCase , __lowerCAmelCase )
__lowercase = regressor.predict(__lowerCAmelCase )
return y_pred[0]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> float:
train_user.sort()
__lowercase = np.percentile(__lowerCAmelCase , 25 )
__lowercase = np.percentile(__lowerCAmelCase , 75 )
__lowercase = qa - qa
__lowercase = qa - (iqr * 0.1)
return low_lim
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ) -> bool:
__lowercase = 0
__lowercase = 0
for i in list_vote:
if i > actual_result:
__lowercase = not_safe + 1
else:
if abs(abs(__lowerCAmelCase ) - abs(__lowerCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
SCREAMING_SNAKE_CASE__ = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
SCREAMING_SNAKE_CASE__ = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
SCREAMING_SNAKE_CASE__ = Normalizer().fit_transform(data_input_df.values)
# split data
SCREAMING_SNAKE_CASE__ = normalize_df[:, 2].tolist()
SCREAMING_SNAKE_CASE__ = normalize_df[:, 0].tolist()
SCREAMING_SNAKE_CASE__ = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
SCREAMING_SNAKE_CASE__ = normalize_df[:, [1, 2]].tolist()
SCREAMING_SNAKE_CASE__ = x[: len(x) - 1]
SCREAMING_SNAKE_CASE__ = x[len(x) - 1 :]
# for linear regression & sarimax
SCREAMING_SNAKE_CASE__ = total_date[: len(total_date) - 1]
SCREAMING_SNAKE_CASE__ = total_user[: len(total_user) - 1]
SCREAMING_SNAKE_CASE__ = total_match[: len(total_match) - 1]
SCREAMING_SNAKE_CASE__ = total_date[len(total_date) - 1 :]
SCREAMING_SNAKE_CASE__ = total_user[len(total_user) - 1 :]
SCREAMING_SNAKE_CASE__ = total_match[len(total_match) - 1 :]
# voting system with forecasting
SCREAMING_SNAKE_CASE__ = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
SCREAMING_SNAKE_CASE__ = "" if data_safety_checker(res_vote, tst_user) else "not "
print("""Today's data is {not_str}safe.""")
| 712 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=36 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Any=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
__lowercase = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[Any] = True
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int=False ) -> Tuple:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = AlbertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a__ ( self : int ) -> Any:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = AlbertModel.from_pretrained('albert-base-v2' )
__lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 688 | 0 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
SCREAMING_SNAKE_CASE__ = True
except ImportError:
SCREAMING_SNAKE_CASE__ = False
try:
from torch.hub import _get_torch_home
SCREAMING_SNAKE_CASE__ = _get_torch_home()
except ImportError:
SCREAMING_SNAKE_CASE__ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
SCREAMING_SNAKE_CASE__ = os.path.join(torch_cache_home, """transformers""")
SCREAMING_SNAKE_CASE__ = """https://cdn.huggingface.co"""
SCREAMING_SNAKE_CASE__ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
SCREAMING_SNAKE_CASE__ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
SCREAMING_SNAKE_CASE__ = os.path.join(PATH, """config.yaml""")
SCREAMING_SNAKE_CASE__ = os.path.join(PATH, """attributes.txt""")
SCREAMING_SNAKE_CASE__ = os.path.join(PATH, """objects.txt""")
SCREAMING_SNAKE_CASE__ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
SCREAMING_SNAKE_CASE__ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
SCREAMING_SNAKE_CASE__ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
SCREAMING_SNAKE_CASE__ = """pytorch_model.bin"""
SCREAMING_SNAKE_CASE__ = """config.yaml"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int=OBJECTS , SCREAMING_SNAKE_CASE : str=ATTRIBUTES ) -> Any:
__lowercase = []
with open(a_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
__lowercase = []
with open(a_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
__lowercase = OrderedDict()
with open(a_ , 'rb' ) as f:
__lowercase = pkl.load(a_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__lowercase = ckp.pop(a_ )
if isinstance(a_ , np.ndarray ):
__lowercase = torch.tensor(a_ )
else:
assert isinstance(a_ , torch.tensor ), type(a_ )
__lowercase = v
return r
class A__ :
lowerCAmelCase__ : Optional[int] = {}
def __init__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] = "root" , _UpperCAmelCase : str=0 ) -> Optional[Any]:
"""simple docstring"""
__lowercase = name
__lowercase = level
__lowercase = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__lowercase = copy.deepcopy(_UpperCAmelCase )
__lowercase = copy.deepcopy(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = Config(_UpperCAmelCase , name=_UpperCAmelCase , level=level + 1 )
__lowercase = v
setattr(self , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = d
def __repr__( self : str ) -> int:
"""simple docstring"""
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = val
__lowercase = val
__lowercase = key.split('.' )
__lowercase = len(_UpperCAmelCase ) - 1
__lowercase = self._pointer
if len(_UpperCAmelCase ) > 1:
for i, l in enumerate(_UpperCAmelCase ):
if hasattr(self , _UpperCAmelCase ) and isinstance(getattr(self , _UpperCAmelCase ) , _UpperCAmelCase ):
setattr(getattr(self , _UpperCAmelCase ) , '.'.join(levels[i:] ) , _UpperCAmelCase )
if l == last_level:
__lowercase = val
else:
__lowercase = pointer[l]
def a__ ( self : int ) -> Dict:
"""simple docstring"""
return self._pointer
def a__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
with open(f"""{file_name}""" , 'w' ) as stream:
dump(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
with open(f"""{file_name}""" , 'w' ) as stream:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
@staticmethod
def a__ ( _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
with open(_UpperCAmelCase ) as stream:
__lowercase = load(_UpperCAmelCase , Loader=_UpperCAmelCase )
return data
def __str__( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = ''' '''
if self._name != "root":
__lowercase = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__lowercase = ''''''
__lowercase = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(_UpperCAmelCase ).__name__})\n"""
__lowercase = level
return r[:-1]
@classmethod
def a__ ( cls : Tuple , _UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
return cls(_UpperCAmelCase )
@classmethod
def a__ ( cls : int , _UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Any ) -> int:
"""simple docstring"""
__lowercase = kwargs.pop('cache_dir' , _UpperCAmelCase )
__lowercase = kwargs.pop('force_download' , _UpperCAmelCase )
__lowercase = kwargs.pop('resume_download' , _UpperCAmelCase )
__lowercase = kwargs.pop('proxies' , _UpperCAmelCase )
__lowercase = kwargs.pop('local_files_only' , _UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
__lowercase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
elif os.path.isfile(_UpperCAmelCase ) or is_remote_url(_UpperCAmelCase ):
__lowercase = pretrained_model_name_or_path
else:
__lowercase = hf_bucket_url(_UpperCAmelCase , filename=_UpperCAmelCase , use_cdn=_UpperCAmelCase )
try:
# Load from URL or cache if already cached
__lowercase = cached_path(
_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__lowercase = Config.load_yaml(_UpperCAmelCase )
except EnvironmentError:
__lowercase = '''Can\'t load config for'''
raise EnvironmentError(_UpperCAmelCase )
if resolved_config_file == config_file:
print('loading configuration file from path' )
else:
print('loading configuration file cache' )
return Config.load_yaml(_UpperCAmelCase ), kwargs
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
__lowercase = torch.load('dump.pt' , map_location=in_tensor.device )
__lowercase = in_tensor.numpy()
__lowercase = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(a_ , a_ , rtol=0.01 , atol=0.1 ), (
F"""{sum([1 for x in np.isclose(a_ , a_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> List[Any]:
__lowercase = urlparse(a_ )
return parsed.scheme in ("http", "https")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str=True ) -> str:
__lowercase = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__lowercase = '''/''' not in model_id
if legacy_format:
return F"""{endpoint}/{model_id}-{filename}"""
else:
return F"""{endpoint}/{model_id}/{filename}"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Optional[Any]=0 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ) -> int:
__lowercase = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(a_ , a_ ):
ua += "; " + "; ".join('{}/{}'.format(a_ , a_ ) for k, v in user_agent.items() )
elif isinstance(a_ , a_ ):
ua += "; " + user_agent
__lowercase = {'''user-agent''': ua}
if resume_size > 0:
__lowercase = '''bytes=%d-''' % (resume_size,)
__lowercase = requests.get(a_ , stream=a_ , proxies=a_ , headers=a_ )
if response.status_code == 416: # Range not satisfiable
return
__lowercase = response.headers.get('Content-Length' )
__lowercase = resume_size + int(a_ ) if content_length is not None else None
__lowercase = tqdm(
unit='B' , unit_scale=a_ , total=a_ , initial=a_ , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(a_ ) )
temp_file.write(a_ )
progress.close()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=10 , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Dict=False , ) -> Any:
if cache_dir is None:
__lowercase = TRANSFORMERS_CACHE
if isinstance(a_ , a_ ):
__lowercase = str(a_ )
os.makedirs(a_ , exist_ok=a_ )
__lowercase = None
if not local_files_only:
try:
__lowercase = requests.head(a_ , allow_redirects=a_ , proxies=a_ , timeout=a_ )
if response.status_code == 200:
__lowercase = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__lowercase = url_to_filename(a_ , a_ )
# get cache path to put the file
__lowercase = os.path.join(a_ , a_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(a_ ):
return cache_path
else:
__lowercase = [
file
for file in fnmatch.filter(os.listdir(a_ ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(a_ ) > 0:
return os.path.join(a_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(a_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__lowercase = cache_path + '''.lock'''
with FileLock(a_ ):
# If the download just completed while the lock was activated.
if os.path.exists(a_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__lowercase = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(a_ , 'a+b' ) as f:
yield f
__lowercase = _resumable_file_manager
if os.path.exists(a_ ):
__lowercase = os.stat(a_ ).st_size
else:
__lowercase = 0
else:
__lowercase = partial(tempfile.NamedTemporaryFile , dir=a_ , delete=a_ )
__lowercase = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , a_ , temp_file.name , )
http_get(
a_ , a_ , proxies=a_ , resume_size=a_ , user_agent=a_ , )
os.replace(temp_file.name , a_ )
__lowercase = {'''url''': url, '''etag''': etag}
__lowercase = cache_path + '''.json'''
with open(a_ , 'w' ) as meta_file:
json.dump(a_ , a_ )
return cache_path
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str]=None ) -> Any:
__lowercase = url.encode('utf-8' )
__lowercase = shaaaa(a_ )
__lowercase = url_hash.hexdigest()
if etag:
__lowercase = etag.encode('utf-8' )
__lowercase = shaaaa(a_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : List[Any]=False , ) -> Any:
if cache_dir is None:
__lowercase = TRANSFORMERS_CACHE
if isinstance(a_ , a_ ):
__lowercase = str(a_ )
if isinstance(a_ , a_ ):
__lowercase = str(a_ )
if is_remote_url(a_ ):
# URL, so get it from the cache (downloading if necessary)
__lowercase = get_from_cache(
a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , user_agent=a_ , local_files_only=a_ , )
elif os.path.exists(a_ ):
# File, and it exists.
__lowercase = url_or_filename
elif urlparse(a_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(a_ ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(a_ ) )
if extract_compressed_file:
if not is_zipfile(a_ ) and not tarfile.is_tarfile(a_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__lowercase = os.path.split(a_ )
__lowercase = output_file.replace('.' , '-' ) + '''-extracted'''
__lowercase = os.path.join(a_ , a_ )
if os.path.isdir(a_ ) and os.listdir(a_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__lowercase = output_path + '''.lock'''
with FileLock(a_ ):
shutil.rmtree(a_ , ignore_errors=a_ )
os.makedirs(a_ )
if is_zipfile(a_ ):
with ZipFile(a_ , 'r' ) as zip_file:
zip_file.extractall(a_ )
zip_file.close()
elif tarfile.is_tarfile(a_ ):
__lowercase = tarfile.open(a_ )
tar_file.extractall(a_ )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(a_ ) )
return output_path_extracted
return output_path
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int]="," ) -> Optional[Any]:
assert isinstance(a_ , a_ )
if os.path.isfile(a_ ):
with open(a_ ) as f:
__lowercase = eval(f.read() )
else:
__lowercase = requests.get(a_ )
try:
__lowercase = requests.json()
except Exception:
__lowercase = req.content.decode()
assert data is not None, "could not connect"
try:
__lowercase = eval(a_ )
except Exception:
__lowercase = data.split('\n' )
req.close()
return data
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] ) -> str:
__lowercase = requests.get(a_ )
__lowercase = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Dict:
__lowercase = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(a_ )
with open(a_ , 'rb' ) as stream:
__lowercase = pkl.load(a_ )
__lowercase = weights.pop('model' )
__lowercase = {}
for k, v in model.items():
__lowercase = torch.from_numpy(a_ )
if "running_var" in k:
__lowercase = torch.tensor([0] )
__lowercase = k.replace('running_var' , 'num_batches_tracked' )
__lowercase = zero
return new
def __SCREAMING_SNAKE_CASE ( ) -> int:
print(F"""{os.path.abspath(os.path.join(a_ , os.pardir ) )}/demo.ipynb""" )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]="RGB" ) -> str:
assert isinstance(a_ , a_ )
if os.path.isfile(a_ ):
__lowercase = cva.imread(a_ )
else:
__lowercase = get_image_from_url(a_ )
assert img is not None, F"""could not connect to: {im}"""
__lowercase = cva.cvtColor(a_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__lowercase = img[:, :, ::-1]
return img
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str=1 ) -> List[str]:
return (images[i : i + batch] for i in range(0 , len(a_ ) , a_ ))
| 713 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : str = "▁" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[str, AddedToken] = "<unk>" , _UpperCAmelCase : Union[str, AddedToken] = "</s>" , _UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['token']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__lowercase = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__lowercase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> str:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : int , _UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> Dict:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['unk']['id']
__lowercase = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 688 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar("""T""")
class A__ ( Generic[T] ):
def __init__( self : Any , _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = data
__lowercase = self
__lowercase = 0
class A__ ( Generic[T] ):
def __init__( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = {}
def a__ ( self : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = DisjointSetTreeNode(_A )
def a__ ( self : int , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.map[data]
if elem_ref != elem_ref.parent:
__lowercase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def a__ ( self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
if nodea.rank > nodea.rank:
__lowercase = nodea
else:
__lowercase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
self.link(self.find_set(_A ) , self.find_set(_A ) )
class A__ ( Generic[T] ):
def __init__( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = {}
def a__ ( self : str , _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
if node not in self.connections:
__lowercase = {}
def a__ ( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
self.add_node(_A )
self.add_node(_A )
__lowercase = weight
__lowercase = weight
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = []
__lowercase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _UpperCAmelCase : x[2] )
# creating the disjoint set
__lowercase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_A )
# MST generation
__lowercase = 0
__lowercase = 0
__lowercase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__lowercase , __lowercase , __lowercase = edges[index]
index += 1
__lowercase = disjoint_set.find_set(_A )
__lowercase = disjoint_set.find_set(_A )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_A , _A , _A )
disjoint_set.union(_A , _A )
return graph
| 714 |
import string
from math import logaa
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> int:
__lowercase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__lowercase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> tuple[int, int]:
__lowercase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase = corpus_without_punctuation.split('\n' )
__lowercase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
return round(tf * idf , 3 )
| 688 | 0 |
from copy import deepcopy
class A__ :
def __init__( self : Optional[Any] , _UpperCAmelCase : list[int] | None = None , _UpperCAmelCase : int | None = None ) -> List[str]:
"""simple docstring"""
if arr is None and size is not None:
__lowercase = size
__lowercase = [0] * size
elif arr is not None:
self.init(__UpperCamelCase )
else:
raise ValueError('Either arr or size must be specified' )
def a__ ( self : Any , _UpperCAmelCase : list[int] ) -> Any:
"""simple docstring"""
__lowercase = len(__UpperCamelCase )
__lowercase = deepcopy(__UpperCamelCase )
for i in range(1 , self.size ):
__lowercase = self.next_(__UpperCamelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
__lowercase = self.next_(__UpperCamelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def a__ ( _UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def a__ ( _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
return index - (index & (-index))
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
__lowercase = self.next_(__UpperCamelCase )
def a__ ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
self.add(__UpperCamelCase , value - self.get(__UpperCamelCase ) )
def a__ ( self : int , _UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
if right == 0:
return 0
__lowercase = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
__lowercase = self.prev(__UpperCamelCase )
return result
def a__ ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
return self.prefix(__UpperCamelCase ) - self.prefix(__UpperCamelCase )
def a__ ( self : int , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
return self.query(__UpperCamelCase , index + 1 )
def a__ ( self : List[str] , _UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
__lowercase = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
__lowercase = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "retribert"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=3_05_22 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=5_12 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=1e-1_2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = share_encoders
__lowercase = projection_dim
| 688 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any=False ) -> int:
try:
__lowercase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowercase = default
else:
# KEY is set, convert it to True or False.
try:
__lowercase = strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
SCREAMING_SNAKE_CASE__ = parse_flag_from_env("""RUN_SLOW""", default=False)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
return unittest.skip('Test was skipped' )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> int:
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> Dict:
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> int:
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> int:
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : str=None ) -> str:
if test_case is None:
return partial(_UpperCamelCase , version=_UpperCamelCase )
return unittest.skipUnless(is_torch_version('>=' , _UpperCamelCase ) , F"""test requires torch version >= {version}""" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> List[str]:
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> Tuple:
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCamelCase )
class A__ ( unittest.TestCase ):
lowerCAmelCase__ : List[str] = True
@classmethod
def a__ ( cls : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
@classmethod
def a__ ( cls : Any ) -> Dict:
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCAmelCase )
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class A__ ( unittest.TestCase ):
def a__ ( self : List[Any] , _UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = mocks if isinstance(_UpperCAmelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
__lowercase = AcceleratorState()
__lowercase = tensor[None].clone().to(state.device )
__lowercase = gather(_UpperCamelCase ).cpu()
__lowercase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCamelCase ):
return False
return True
class A__ :
def __init__( self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = returncode
__lowercase = stdout
__lowercase = stderr
async def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
while True:
__lowercase = await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Tuple=False ) -> _RunOutput:
if echo:
print('\nRunning: ' , ' '.join(_UpperCamelCase ) )
__lowercase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowercase = []
__lowercase = []
def tee(SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str="" ):
__lowercase = line.decode('utf-8' ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda SCREAMING_SNAKE_CASE : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda SCREAMING_SNAKE_CASE : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : int=180 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=True ) -> _RunOutput:
__lowercase = asyncio.get_event_loop()
__lowercase = loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
__lowercase = ' '.join(_UpperCamelCase )
if result.returncode > 0:
__lowercase = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str]=False ) -> List[str]:
try:
__lowercase = subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCamelCase , 'decode' ):
__lowercase = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> str:
__lowercase = 0
for ch in input_str:
__lowercase = ord(SCREAMING_SNAKE_CASE )
__lowercase = pow(2 , SCREAMING_SNAKE_CASE )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = ["image_processor", "tokenizer"]
lowerCAmelCase__ : Union[str, Any] = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ : Union[str, Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features['words']
__lowercase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__lowercase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowercase = images
return encoded_inputs
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def a__ ( self : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 688 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ :
lowerCAmelCase__ : Optional[int] = "dummy_data"
lowerCAmelCase__ : str = "datasets"
lowerCAmelCase__ : Dict = False
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[Version, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[List[Callable]] = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 0
__lowercase = dataset_name
__lowercase = cache_dir
__lowercase = use_local_dummy_data
__lowercase = config
# download_callbacks take a single url as input
__lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowercase = str(_UpperCAmelCase )
# to be downloaded
__lowercase = None
__lowercase = None
@property
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self._dummy_file is None:
__lowercase = self.download_dummy_data()
return self._dummy_file
@property
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowercase = cached_path(
_UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCAmelCase , force_extract=_UpperCAmelCase )
return os.path.join(_UpperCAmelCase , self.dummy_file_name )
@property
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self._bucket_url is None:
__lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , *_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return self.create_dummy_data_dict(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(_UpperCAmelCase , _UpperCAmelCase )
else:
return self.create_dummy_data_single(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple , *_UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
return path
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return {}
def a__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for single_url in single_urls:
download_callback(_UpperCAmelCase )
else:
__lowercase = single_urls
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) ) for x in single_urls]
else:
__lowercase = single_urls
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) )
__lowercase = value
# make sure that values are unique
if all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , _UpperCAmelCase ) ) for url in data_url )
__lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowercase = [data_url[0]] * len(_UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_UpperCAmelCase )
return dummy_data_list
def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def a__ ( self : int ) -> str:
"""simple docstring"""
pass
def a__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
def _iter_archive_members(_UpperCAmelCase : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__lowercase = Path(self.dummy_file ).parent
__lowercase = path.relative_to(_UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_UpperCAmelCase )
__lowercase = Path(_UpperCAmelCase )
__lowercase = _iter_archive_members(_UpperCAmelCase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_UpperCAmelCase ).as_posix(), file_path.open('rb' )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [paths]
for path in paths:
if os.path.isfile(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_UpperCAmelCase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase )
| 688 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ) -> List[Any]:
# Initialise PyTorch model
__lowercase = FunnelConfig.from_json_file(snake_case_ )
print(F"""Building PyTorch model from configuration: {config}""" )
__lowercase = FunnelBaseModel(snake_case_ ) if base_model else FunnelModel(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 719 |
import math
import sys
import cva
import numpy as np
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__lowercase = math.sqrt(SCREAMING_SNAKE_CASE )
__lowercase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> np.ndarray:
__lowercase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__lowercase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , SCREAMING_SNAKE_CASE ):
for j in range(0 , SCREAMING_SNAKE_CASE ):
__lowercase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , ) -> np.ndarray:
__lowercase = np.zeros(img.shape )
__lowercase = get_gauss_kernel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__lowercase = get_slice(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = img_s - img_s[kernel_size // 2, kernel_size // 2]
__lowercase = vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.sum(SCREAMING_SNAKE_CASE ) / np.sum(SCREAMING_SNAKE_CASE )
__lowercase = val
return imga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list ) -> tuple:
__lowercase = args[1] if args[1:] else '../image_data/lena.jpg'
__lowercase = float(args[2] ) if args[2:] else 1.0
__lowercase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__lowercase = int(args[4] )
__lowercase = kernel_size + abs(kernel_size % 2 - 1 )
else:
__lowercase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = parse_args(sys.argv)
SCREAMING_SNAKE_CASE__ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
SCREAMING_SNAKE_CASE__ = img / 255
SCREAMING_SNAKE_CASE__ = out.astype("""float32""")
SCREAMING_SNAKE_CASE__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
SCREAMING_SNAKE_CASE__ = out * 255
SCREAMING_SNAKE_CASE__ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 688 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A__ ( lowercase_ ):
@staticmethod
@abstractmethod
def a__ ( _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
raise NotImplementedError() | 720 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : Tuple=4_00 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
__lowercase = size if size is not None else {'height': 18, 'width': 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = apply_ocr
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr' ) )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
pass
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase )
self.assertIsInstance(encoding.boxes , _UpperCAmelCase )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowercase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__lowercase = Image.open(ds[0]['file'] ).convert('RGB' )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowercase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__lowercase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase )
self.assertListEqual(encoding.boxes , _UpperCAmelCase )
# with apply_OCR = False
__lowercase = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 688 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> list[int]:
if num <= 0:
raise ValueError('Input must be a positive integer' )
__lowercase = [True] * (num + 1)
__lowercase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowerCamelCase__ ):
__lowercase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 721 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "umt5"
lowerCAmelCase__ : Tuple = ["past_key_values"]
def __init__( self : str , _UpperCAmelCase : int=25_01_12 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : List[str]=64 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : str=8 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : str=32 , _UpperCAmelCase : Optional[int]=1_28 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : str=1e-6 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str="gated-gelu" , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Tuple="T5Tokenizer" , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : int=1 , _UpperCAmelCase : List[str]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=_UpperCAmelCase , tokenizer_class=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_heads
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = self.feed_forward_proj.split('-' )
__lowercase = act_info[-1]
__lowercase = act_info[0] == 'gated'
if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__lowercase = 'gelu_new'
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.d_model
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.num_heads
@property
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.num_layers
class A__ ( lowerCAmelCase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowercase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase = 'past_encoder_sequence + sequence'
__lowercase = {0: 'batch'}
__lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return 13
@property
def a__ ( self : Dict ) -> float:
"""simple docstring"""
return 5e-4
| 688 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
__lowercase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : int=5_02_65 , _UpperCAmelCase : Union[str, Any]=7_68 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[str]=30_72 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Tuple=1_28 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : int=64 , _UpperCAmelCase : List[str]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=2_24 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = version.parse("1.12" )
@property
def a__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : Any ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return 12
def a__ ( self : Tuple , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 688 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class A__ ( unittest.TestCase ):
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = 0
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
__lowercase = Path(_UpperCAmelCase ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_UpperCAmelCase , 'w' ) )
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
__lowercase = Path(_UpperCAmelCase ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_UpperCAmelCase , 'w' ) )
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__lowercase = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
__lowercase = Path(_UpperCAmelCase ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_UpperCAmelCase , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase ).to_dict()
config_dict.pop('image_processor_type' )
__lowercase = CLIPImageProcessor(**_UpperCAmelCase )
# save in new folder
model_config.save_pretrained(_UpperCAmelCase )
config.save_pretrained(_UpperCAmelCase )
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
# make sure private variable is not incorrectly saved
__lowercase = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w' ) , )
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCAmelCase , 'clip-base is not a local folder and is not a valid model identifier' ):
__lowercase = AutoImageProcessor.from_pretrained('clip-base' )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase , revision='aaaaaa' )
def a__ ( self : int ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__lowercase = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
__lowercase = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase ):
__lowercase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_UpperCAmelCase )
__lowercase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_UpperCAmelCase )
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
try:
AutoConfig.register('custom' , _UpperCAmelCase )
AutoImageProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoImageProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
__lowercase = Path(_UpperCAmelCase ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_UpperCAmelCase , 'w' ) )
__lowercase = CustomImageProcessor.from_pretrained(_UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_UpperCAmelCase )
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Dict = True
try:
AutoConfig.register('custom' , _UpperCAmelCase )
AutoImageProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# If remote code is not set, the default is to use local
__lowercase = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__lowercase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__lowercase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(_UpperCAmelCase , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 701 |
from pathlib import Path
import numpy as np
from PIL import Image
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase , __lowercase , __lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase = np.zeros_like(SCREAMING_SNAKE_CASE )
__lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
SCREAMING_SNAKE_CASE__ = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 688 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 702 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = ["pixel_values"]
def __init__( self : Tuple , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 2_55 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : bool = True , **_UpperCAmelCase : str , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = size if size is not None else {'height': 3_84, 'width': 3_84}
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase = do_convert_rgb
def a__ ( self : int , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
__lowercase = (size['height'], size['width'])
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : ImageInput , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Dict[str, int]] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : int , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__lowercase = BatchFeature(data={'pixel_values': images} , tensor_type=_UpperCAmelCase )
return encoded_outputs
| 688 | 0 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""jukebox""": 512,
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase__ : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=["v3", "v2", "v2"] , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Union[str, Any]="<|endoftext|>" , **_UpperCAmelCase : Tuple , ) -> List[Any]:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
unk_token=_UpperCAmelCase , n_genres=_UpperCAmelCase , version=_UpperCAmelCase , max_n_lyric_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = version
__lowercase = max_n_lyric_tokens
__lowercase = n_genres
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
__lowercase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowercase = oov.replace(R'\-\'' , R'\-+\'' )
__lowercase = regex.compile(_UpperCAmelCase )
__lowercase = {v: k for k, v in self.artists_encoder.items()}
__lowercase = {v: k for k, v in self.genres_encoder.items()}
__lowercase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = [self.artists_encoder.get(_UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(_UpperCAmelCase ) ):
__lowercase = [self.genres_encoder.get(_UpperCAmelCase , 0 ) for genre in list_genres[genres]]
__lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowercase = [[self.lyrics_encoder.get(_UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def a__ ( self : str , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
return list(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self.prepare_for_tokenization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = self._tokenize(_UpperCAmelCase )
return artist, genre, lyrics
def a__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowercase = artists[idx].lower()
__lowercase = [genres[idx].lower()]
else:
__lowercase = self._normalize(artists[idx] ) + '.v2'
__lowercase = [
self._normalize(_UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
__lowercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
__lowercase = {vocab[index]: index + 1 for index in range(len(_UpperCAmelCase ) )}
__lowercase = 0
__lowercase = len(_UpperCAmelCase ) + 1
__lowercase = self.vocab
__lowercase = {v: k for k, v in self.vocab.items()}
__lowercase = ''
else:
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
__lowercase = self._run_strip_accents(_UpperCAmelCase )
__lowercase = lyrics.replace('\\' , '\n' )
__lowercase = self.out_of_vocab.sub('' , _UpperCAmelCase ), [], []
return artists, genres, lyrics
def a__ ( self : Tuple , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = unicodedata.normalize('NFD' , _UpperCAmelCase )
__lowercase = []
for char in text:
__lowercase = unicodedata.category(_UpperCAmelCase )
if cat == "Mn":
continue
output.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = (
[chr(_UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
__lowercase = frozenset(_UpperCAmelCase )
__lowercase = re.compile(R'_+' )
__lowercase = ''.join([c if c in accepted else '_' for c in text.lower()] )
__lowercase = pattern.sub('_' , _UpperCAmelCase ).strip('_' )
return text
def a__ ( self : List[str] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return " ".join(_UpperCAmelCase )
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = False ) -> int:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = TensorType(_UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
__lowercase = tf.constant
__lowercase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
__lowercase = torch.tensor
__lowercase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
__lowercase = jnp.array
__lowercase = _is_jax
else:
__lowercase = np.asarray
__lowercase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowercase = [inputs]
if not is_tensor(_UpperCAmelCase ):
__lowercase = as_tensor(_UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int="" , _UpperCAmelCase : Tuple="pt" ) -> BatchEncoding:
"""simple docstring"""
__lowercase = [0, 0, 0]
__lowercase = [artist] * len(self.version )
__lowercase = [genres] * len(self.version )
__lowercase , __lowercase , __lowercase = self.tokenize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase , __lowercase , __lowercase = self._convert_token_to_id(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = [-INFINITY] * len(full_tokens[-1] )
__lowercase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def a__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def a__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.artists_decoder.get(_UpperCAmelCase )
__lowercase = [self.genres_decoder.get(_UpperCAmelCase ) for genre in genres_index]
__lowercase = [self.lyrics_decoder.get(_UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> Dict:
__lowercase , __lowercase = image.size
__lowercase , __lowercase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowercase = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
__lowercase = np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 255.0
__lowercase = image[None].transpose(0 , 3 , 1 , 2 )
__lowercase = torch.from_numpy(SCREAMING_SNAKE_CASE )
return 2.0 * image - 1.0
class A__ ( lowerCAmelCase__ ):
def __init__( self : Optional[Any] , _UpperCAmelCase : VQModel , _UpperCAmelCase : UNetaDModel , _UpperCAmelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : str , _UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : Optional[int] = 1_00 , _UpperCAmelCase : Optional[float] = 0.0 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , PIL.Image.Image ):
__lowercase = 1
elif isinstance(_UpperCAmelCase , torch.Tensor ):
__lowercase = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_UpperCAmelCase )}""" )
if isinstance(_UpperCAmelCase , PIL.Image.Image ):
__lowercase = preprocess(_UpperCAmelCase )
__lowercase , __lowercase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__lowercase = (batch_size, self.unet.config.in_channels // 2, height, width)
__lowercase = next(self.unet.parameters() ).dtype
__lowercase = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
__lowercase = image.to(device=self.device , dtype=_UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_UpperCAmelCase , device=self.device )
__lowercase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
for t in self.progress_bar(_UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
__lowercase = torch.cat([latents, image] , dim=1 )
__lowercase = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
__lowercase = self.vqvae.decode(_UpperCAmelCase ).sample
__lowercase = torch.clamp(_UpperCAmelCase , -1.0 , 1.0 )
__lowercase = image / 2 + 0.5
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 704 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Any:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__lowercase = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__lowercase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__lowercase = 4
__lowercase = True
# hparam_utils.py hparams
__lowercase = 0.664_694
__lowercase = 0.207_951
__lowercase = 0.121_194
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = 0.0_352_513
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowercase = 4
__lowercase = False
# hparam_utils.py hparams
__lowercase = 36.4_519
__lowercase = 0.903_421
__lowercase = 222.088
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 0.763_141
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__lowercase = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__lowercase = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__lowercase = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
__lowercase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688 | 0 |
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
__lowercase = factor * value
__lowercase = value
while not is_prime(SCREAMING_SNAKE_CASE ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE )
return value
| 705 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __SCREAMING_SNAKE_CASE ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 688 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
snake_case_ = """__DUMMY_TRANSFORMERS_USER__"""
snake_case_ = """Dummy User"""
snake_case_ = """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"""
snake_case_ = """https://hub-ci.huggingface.co"""
snake_case_ = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
snake_case_ = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
snake_case_ = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] ) -> int:
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , SCREAMING_SNAKE_CASE )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , SCREAMING_SNAKE_CASE )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , SCREAMING_SNAKE_CASE )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> Dict:
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , SCREAMING_SNAKE_CASE )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ) -> List[str]:
HfFolder.save_token(SCREAMING_SNAKE_CASE )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
return HfApi(endpoint=SCREAMING_SNAKE_CASE )
@pytest.fixture(scope='session' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : HfApi ) -> Optional[int]:
__lowercase = HfFolder.get_token()
HfFolder.save_token(SCREAMING_SNAKE_CASE )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(SCREAMING_SNAKE_CASE )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
def _cleanup_repo(SCREAMING_SNAKE_CASE : str ):
hf_api.delete_repo(SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
@contextmanager
def _temporary_repo(SCREAMING_SNAKE_CASE : Tuple ):
try:
yield repo_id
finally:
cleanup_repo(SCREAMING_SNAKE_CASE )
return _temporary_repo
@pytest.fixture(scope='session' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : HfApi , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ) -> Any:
__lowercase = F"""repo_txt_data-{int(time.time() * 10E3 )}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE , repo_type='dataset' , private=SCREAMING_SNAKE_CASE )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE , path_or_fileobj=str(SCREAMING_SNAKE_CASE ) , path_in_repo='data/text_data.txt' , repo_id=SCREAMING_SNAKE_CASE , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : HfApi , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
__lowercase = F"""repo_zipped_txt_data-{int(time.time() * 10E3 )}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE , repo_type='dataset' , private=SCREAMING_SNAKE_CASE )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE , path_or_fileobj=str(SCREAMING_SNAKE_CASE ) , path_in_repo='data.zip' , repo_id=SCREAMING_SNAKE_CASE , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : HfApi , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
__lowercase = F"""repo_zipped_img_data-{int(time.time() * 10E3 )}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE , repo_type='dataset' , private=SCREAMING_SNAKE_CASE )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE , path_or_fileobj=str(SCREAMING_SNAKE_CASE ) , path_in_repo='data.zip' , repo_id=SCREAMING_SNAKE_CASE , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
return hf_private_dataset_repo_zipped_img_data_
| 706 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """Hello, World!"""
SCREAMING_SNAKE_CASE__ = """en_XX"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ) -> Optional[int]:
__lowercase = Path('data_bin' )
__lowercase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe='sentencepiece' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
__lowercase = xmod.model.encoder.sentence_encoder
__lowercase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , SCREAMING_SNAKE_CASE )
__lowercase = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase = xmod_sent_encoder.embed_tokens.weight
__lowercase = xmod_sent_encoder.embed_positions.weight
__lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase = xmod_sent_encoder.layernorm_embedding.weight
__lowercase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase = model.roberta.encoder.layer[i]
__lowercase = xmod_sent_encoder.layers[i]
# self attention
__lowercase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowercase = xmod_layer.self_attn.q_proj.weight
__lowercase = xmod_layer.self_attn.q_proj.bias
__lowercase = xmod_layer.self_attn.k_proj.weight
__lowercase = xmod_layer.self_attn.k_proj.bias
__lowercase = xmod_layer.self_attn.v_proj.weight
__lowercase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowercase = xmod_layer.self_attn.out_proj.weight
__lowercase = xmod_layer.self_attn.out_proj.bias
__lowercase = xmod_layer.self_attn_layer_norm.weight
__lowercase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
# output
__lowercase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
__lowercase = xmod_layer.final_layer_norm.weight
__lowercase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase = xmod_layer.adapter_layer_norm.weight
__lowercase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase = bert_output.adapter_modules[lang_code]
__lowercase = xmod_layer.adapter_modules[lang_code]
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase = xmod_sent_encoder.layer_norm.weight
__lowercase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].dense.weight
__lowercase = xmod.model.classification_heads['mnli'].dense.bias
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight
__lowercase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowercase = xmod.model.encoder.lm_head.dense.weight
__lowercase = xmod.model.encoder.lm_head.dense.bias
__lowercase = xmod.model.encoder.lm_head.layer_norm.weight
__lowercase = xmod.model.encoder.lm_head.layer_norm.bias
__lowercase = xmod.model.encoder.lm_head.weight
__lowercase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
__lowercase = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
__lowercase = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowercase = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 688 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Dict = "ClapFeatureExtractor"
lowerCAmelCase__ : List[str] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Tuple , _UpperCAmelCase : int=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : Any ) -> int:
"""simple docstring"""
__lowercase = kwargs.pop('sampling_rate' , _UpperCAmelCase )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
__lowercase = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if audios is not None:
__lowercase = self.feature_extractor(
_UpperCAmelCase , sampling_rate=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and audios is not None:
__lowercase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a__ ( self : Dict , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : int , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : int ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.tokenizer.model_input_names
__lowercase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 707 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ) -> float:
__lowercase = u
for i in range(1 , SCREAMING_SNAKE_CASE ):
__lowercase = temp * (u - i)
return temp
def __SCREAMING_SNAKE_CASE ( ) -> None:
__lowercase = int(input('enter the numbers of values: ' ) )
__lowercase = []
for _ in range(SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
y[i].append(SCREAMING_SNAKE_CASE )
__lowercase = 0
print('enter the values of parameters in a list: ' )
__lowercase = list(map(SCREAMING_SNAKE_CASE , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(SCREAMING_SNAKE_CASE ):
__lowercase = float(input() )
__lowercase = int(input('enter the value to interpolate: ' ) )
__lowercase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
__lowercase = y[j + 1][i - 1] - y[j][i - 1]
__lowercase = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
summ += (ucal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 688 | 0 |
'''simple docstring'''
class A__ :
def __init__( self : Any ) -> Any:
"""simple docstring"""
__lowercase = ''
__lowercase = ''
__lowercase = []
def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
__lowercase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
__lowercase = self.__min_dist_top_down_dp(_UpperCAmelCase , n - 1 )
__lowercase = self.__min_dist_top_down_dp(m - 1 , _UpperCAmelCase )
__lowercase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
__lowercase = 1 + min(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self.dp[m][n]
def a__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
__lowercase = worda
__lowercase = worda
__lowercase = [[-1 for _ in range(len(_UpperCAmelCase ) )] for _ in range(len(_UpperCAmelCase ) )]
return self.__min_dist_top_down_dp(len(_UpperCAmelCase ) - 1 , len(_UpperCAmelCase ) - 1 )
def a__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
__lowercase = worda
__lowercase = worda
__lowercase = len(_UpperCAmelCase )
__lowercase = len(_UpperCAmelCase )
__lowercase = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
__lowercase = j
elif j == 0: # second string is empty
__lowercase = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
__lowercase = self.dp[i - 1][j - 1]
else:
__lowercase = self.dp[i][j - 1]
__lowercase = self.dp[i - 1][j]
__lowercase = self.dp[i - 1][j - 1]
__lowercase = 1 + min(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self.dp[m][n]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
SCREAMING_SNAKE_CASE__ = input("""Enter the first string: """).strip()
SCREAMING_SNAKE_CASE__ = input("""Enter the second string: """).strip()
print()
print(F'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(F'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 708 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
__lowercase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 0 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
__lowercase = AutoTokenizer.from_pretrained('google/mt5-small' )
__lowercase = tokenizer('Hello there' , return_tensors='np' ).input_ids
__lowercase = tokenizer('Hi I am' , return_tensors='np' ).input_ids
__lowercase = shift_tokens_right(_UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
__lowercase = model(_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ).logits
__lowercase = optax.softmax_cross_entropy(_UpperCAmelCase , onehot(_UpperCAmelCase , logits.shape[-1] ) ).mean()
__lowercase = -(labels.shape[-1] * loss.item())
__lowercase = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 709 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
__lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 688 | 0 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : List[Any] = BloomTokenizerFast
lowerCAmelCase__ : Tuple = BloomTokenizerFast
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : List[Any] = "tokenizer_file"
lowerCAmelCase__ : int = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
__lowercase = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self : Tuple , **_UpperCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.get_rust_tokenizer()
__lowercase = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
__lowercase = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
__lowercase = tokenizer.batch_encode_plus(_UpperCAmelCase )['input_ids']
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : str=6 ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__lowercase = 'This is a simple input'
__lowercase = ['This is a simple input 1', 'This is a simple input 2']
__lowercase = ('This is a simple input', 'This is a pair')
__lowercase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.batch_encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.encode(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.batch_encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
__lowercase = None # Hotfixing padding = None
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , )
def a__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.get_rust_tokenizer()
__lowercase = load_dataset('xnli' , 'all_languages' , split='test' , streaming=_UpperCAmelCase )
__lowercase = next(iter(_UpperCAmelCase ) )['premise'] # pick up one data
__lowercase = list(sample_data.values() )
__lowercase = list(map(tokenizer.encode , _UpperCAmelCase ) )
__lowercase = [tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) for x in output_tokens]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : int ) -> str:
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 710 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = ProphetNetTokenizer
lowerCAmelCase__ : str = False
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : str , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = 'UNwant\u00E9d,running'
__lowercase = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowercase = {}
for i, token in enumerate(_UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowercase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
__lowercase = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 688 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE__ = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
SCREAMING_SNAKE_CASE__ = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
SCREAMING_SNAKE_CASE__ = """|""".join(sys.argv[1:])
SCREAMING_SNAKE_CASE__ = re.compile(rF'''^({joined_dirs}).*?\.py$''')
SCREAMING_SNAKE_CASE__ = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 711 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""jukebox""": 512,
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase__ : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=["v3", "v2", "v2"] , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Union[str, Any]="<|endoftext|>" , **_UpperCAmelCase : Tuple , ) -> List[Any]:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
unk_token=_UpperCAmelCase , n_genres=_UpperCAmelCase , version=_UpperCAmelCase , max_n_lyric_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = version
__lowercase = max_n_lyric_tokens
__lowercase = n_genres
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
__lowercase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowercase = oov.replace(R'\-\'' , R'\-+\'' )
__lowercase = regex.compile(_UpperCAmelCase )
__lowercase = {v: k for k, v in self.artists_encoder.items()}
__lowercase = {v: k for k, v in self.genres_encoder.items()}
__lowercase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = [self.artists_encoder.get(_UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(_UpperCAmelCase ) ):
__lowercase = [self.genres_encoder.get(_UpperCAmelCase , 0 ) for genre in list_genres[genres]]
__lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowercase = [[self.lyrics_encoder.get(_UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def a__ ( self : str , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
return list(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self.prepare_for_tokenization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = self._tokenize(_UpperCAmelCase )
return artist, genre, lyrics
def a__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowercase = artists[idx].lower()
__lowercase = [genres[idx].lower()]
else:
__lowercase = self._normalize(artists[idx] ) + '.v2'
__lowercase = [
self._normalize(_UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
__lowercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
__lowercase = {vocab[index]: index + 1 for index in range(len(_UpperCAmelCase ) )}
__lowercase = 0
__lowercase = len(_UpperCAmelCase ) + 1
__lowercase = self.vocab
__lowercase = {v: k for k, v in self.vocab.items()}
__lowercase = ''
else:
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
__lowercase = self._run_strip_accents(_UpperCAmelCase )
__lowercase = lyrics.replace('\\' , '\n' )
__lowercase = self.out_of_vocab.sub('' , _UpperCAmelCase ), [], []
return artists, genres, lyrics
def a__ ( self : Tuple , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = unicodedata.normalize('NFD' , _UpperCAmelCase )
__lowercase = []
for char in text:
__lowercase = unicodedata.category(_UpperCAmelCase )
if cat == "Mn":
continue
output.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = (
[chr(_UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
__lowercase = frozenset(_UpperCAmelCase )
__lowercase = re.compile(R'_+' )
__lowercase = ''.join([c if c in accepted else '_' for c in text.lower()] )
__lowercase = pattern.sub('_' , _UpperCAmelCase ).strip('_' )
return text
def a__ ( self : List[str] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return " ".join(_UpperCAmelCase )
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = False ) -> int:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = TensorType(_UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
__lowercase = tf.constant
__lowercase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
__lowercase = torch.tensor
__lowercase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
__lowercase = jnp.array
__lowercase = _is_jax
else:
__lowercase = np.asarray
__lowercase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowercase = [inputs]
if not is_tensor(_UpperCAmelCase ):
__lowercase = as_tensor(_UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int="" , _UpperCAmelCase : Tuple="pt" ) -> BatchEncoding:
"""simple docstring"""
__lowercase = [0, 0, 0]
__lowercase = [artist] * len(self.version )
__lowercase = [genres] * len(self.version )
__lowercase , __lowercase , __lowercase = self.tokenize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase , __lowercase , __lowercase = self._convert_token_to_id(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = [-INFINITY] * len(full_tokens[-1] )
__lowercase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def a__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def a__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.artists_decoder.get(_UpperCAmelCase )
__lowercase = [self.genres_decoder.get(_UpperCAmelCase ) for genre in genres_index]
__lowercase = [self.lyrics_decoder.get(_UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 688 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """spm_char.model"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""",
"""microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""",
"""microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""",
}
}
SCREAMING_SNAKE_CASE__ = {
"""microsoft/speecht5_asr""": 1024,
"""microsoft/speecht5_tts""": 1024,
"""microsoft/speecht5_vc""": 1024,
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Dict = VOCAB_FILES_NAMES
lowerCAmelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str="<s>" , _UpperCAmelCase : Any="</s>" , _UpperCAmelCase : Optional[int]="<unk>" , _UpperCAmelCase : List[str]="<pad>" , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : List[str] , ) -> None:
"""simple docstring"""
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.sp_model.get_piece_size()
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : Optional[Any] , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
__lowercase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ ( self : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return self.sp_model.piece_to_id(_UpperCAmelCase )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase = self.sp_model.IdToPiece(_UpperCAmelCase )
return token
def a__ ( self : List[str] , _UpperCAmelCase : List[str] ) -> int:
"""simple docstring"""
__lowercase = []
__lowercase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
__lowercase = []
else:
current_sub_tokens.append(_UpperCAmelCase )
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__lowercase = [1]
if token_ids_a is None:
return ([0] * len(_UpperCAmelCase )) + suffix_ones
return ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def a__ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 712 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=36 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Any=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
__lowercase = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[Any] = True
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int=False ) -> Tuple:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = AlbertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a__ ( self : int ) -> Any:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = AlbertModel.from_pretrained('albert-base-v2' )
__lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 688 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( lowerCAmelCase__ ):
def __init__( self : Optional[Any] , _UpperCAmelCase : WhisperForConditionalGeneration , _UpperCAmelCase : WhisperProcessor , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : CLIPTextModel , _UpperCAmelCase : CLIPTokenizer , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _UpperCAmelCase : StableDiffusionSafetyChecker , _UpperCAmelCase : CLIPImageProcessor , ) -> Dict:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=_UpperCAmelCase , speech_processor=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[Union[str, int]] = "auto" ) -> str:
"""simple docstring"""
if slice_size == "auto":
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.enable_attention_slicing(_UpperCAmelCase )
@torch.no_grad()
def __call__( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=1_60_00 , _UpperCAmelCase : int = 5_12 , _UpperCAmelCase : int = 5_12 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : float = 7.5 , _UpperCAmelCase : Optional[Union[str, List[str]]] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCAmelCase : int = 1 , **_UpperCAmelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
__lowercase = self.speech_processor.feature_extractor(
_UpperCAmelCase , return_tensors='pt' , sampling_rate=_UpperCAmelCase ).input_features.to(self.device )
__lowercase = self.speech_model.generate(_UpperCAmelCase , max_length=48_00_00 )
__lowercase = self.speech_processor.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , normalize=_UpperCAmelCase )[
0
]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = 1
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = len(_UpperCAmelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_UpperCAmelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_UpperCAmelCase )}.""" )
# get prompt text embeddings
__lowercase = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowercase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__lowercase , __lowercase , __lowercase = text_embeddings.shape
__lowercase = text_embeddings.repeat(1 , _UpperCAmelCase , 1 )
__lowercase = text_embeddings.view(bs_embed * num_images_per_prompt , _UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = 42
if negative_prompt is None:
__lowercase = [''] * batch_size
elif type(_UpperCAmelCase ) is not type(_UpperCAmelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_UpperCAmelCase )} !="""
f""" {type(_UpperCAmelCase )}.""" )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [negative_prompt]
elif batch_size != len(_UpperCAmelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_UpperCAmelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
__lowercase = negative_prompt
__lowercase = text_input_ids.shape[-1]
__lowercase = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='pt' , )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowercase = uncond_embeddings.shape[1]
__lowercase = uncond_embeddings.repeat(1 , _UpperCAmelCase , 1 )
__lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt , _UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__lowercase = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
__lowercase = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__lowercase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__lowercase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__lowercase , __lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = 1 / 0.18_215 * latents
__lowercase = self.vae.decode(_UpperCAmelCase ).sample
__lowercase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase )
| 713 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : str = "▁" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[str, AddedToken] = "<unk>" , _UpperCAmelCase : Union[str, AddedToken] = "</s>" , _UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['token']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__lowercase = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__lowercase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> str:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : int , _UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> Dict:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['unk']['id']
__lowercase = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 688 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A__ :
lowerCAmelCase__ : str = MBartConfig
lowerCAmelCase__ : Optional[Any] = {}
lowerCAmelCase__ : Optional[int] = "gelu"
def __init__( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=13 , _UpperCAmelCase : Dict=7 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : List[Any]=99 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : int=2 , _UpperCAmelCase : str=4 , _UpperCAmelCase : List[str]=37 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : str=20 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : Union[str, Any]=0 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase = prepare_mbart_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def a__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFMBartModel(config=_UpperCAmelCase ).get_decoder()
__lowercase = inputs_dict['input_ids']
__lowercase = input_ids[:1, :]
__lowercase = inputs_dict['attention_mask'][:1, :]
__lowercase = inputs_dict['head_mask']
__lowercase = 1
# first forward pass
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
__lowercase , __lowercase = outputs.to_tuple()
__lowercase = past_key_values[1]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , ) -> int:
if attention_mask is None:
__lowercase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowerCAmelCase__ : Dict = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase__ : int = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : Optional[int] = False
def a__ ( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = TFMBartModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class A__ ( unittest.TestCase ):
lowerCAmelCase__ : Optional[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
]
lowerCAmelCase__ : List[Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
lowerCAmelCase__ : int = "facebook/mbart-large-en-ro"
@cached_property
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def a__ ( self : Any , **_UpperCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.translate_src_text(**_UpperCAmelCase )
self.assertListEqual(self.expected_text , _UpperCAmelCase )
def a__ ( self : Dict , **_UpperCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.tokenizer(self.src_text , **_UpperCAmelCase , return_tensors='tf' )
__lowercase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__lowercase = self.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 714 |
import string
from math import logaa
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> int:
__lowercase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__lowercase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> tuple[int, int]:
__lowercase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase = corpus_without_punctuation.split('\n' )
__lowercase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
return round(tf * idf , 3 )
| 688 | 0 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 715 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "retribert"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=3_05_22 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=5_12 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=1e-1_2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = share_encoders
__lowercase = projection_dim
| 688 | 0 |
import logging
from transformers.configuration_utils import PretrainedConfig
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : int = "masked_bert"
def __init__( self : Tuple , _UpperCAmelCase : Dict=3_05_22 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[int]=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=5_12 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : str=1e-1_2 , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : Tuple="topK" , _UpperCAmelCase : Dict="constant" , _UpperCAmelCase : Tuple=0.0 , **_UpperCAmelCase : Tuple , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = pruning_method
__lowercase = mask_init
__lowercase = mask_scale
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/unispeech-sat-base-100h-libri-ft""": (
"""https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"""
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = "unispeech-sat"
def __init__( self : Union[str, Any] , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : Union[str, Any]=7_68 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : int=30_72 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : Optional[int]=1e-5 , _UpperCAmelCase : Dict="group" , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[str]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _UpperCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase : Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : List[str]=1_28 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : str=False , _UpperCAmelCase : Any=True , _UpperCAmelCase : Dict=0.05 , _UpperCAmelCase : Tuple=10 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Union[str, Any]=10 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : Dict=3_20 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[int]=1_00 , _UpperCAmelCase : List[Any]=2_56 , _UpperCAmelCase : Optional[int]=2_56 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Tuple="mean" , _UpperCAmelCase : Dict=False , _UpperCAmelCase : str=False , _UpperCAmelCase : int=2_56 , _UpperCAmelCase : Any=(5_12, 5_12, 5_12, 5_12, 15_00) , _UpperCAmelCase : Optional[Any]=(5, 3, 3, 1, 1) , _UpperCAmelCase : str=(1, 2, 3, 1, 1) , _UpperCAmelCase : Tuple=5_12 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Any=1 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Optional[int]=5_04 , **_UpperCAmelCase : int , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
__lowercase = hidden_size
__lowercase = feat_extract_norm
__lowercase = feat_extract_activation
__lowercase = list(_UpperCAmelCase )
__lowercase = list(_UpperCAmelCase )
__lowercase = list(_UpperCAmelCase )
__lowercase = conv_bias
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = vocab_size
__lowercase = num_clusters
__lowercase = do_stable_layer_norm
__lowercase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = apply_spec_augment
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
__lowercase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowercase = num_codevectors_per_group
__lowercase = num_codevector_groups
__lowercase = contrastive_logits_temperature
__lowercase = feat_quantizer_dropout
__lowercase = num_negatives
__lowercase = codevector_dim
__lowercase = proj_codevector_dim
__lowercase = diversity_loss_weight
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase = list(_UpperCAmelCase )
__lowercase = list(_UpperCAmelCase )
__lowercase = list(_UpperCAmelCase )
__lowercase = xvector_output_dim
@property
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 717 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = ["image_processor", "tokenizer"]
lowerCAmelCase__ : Union[str, Any] = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ : Union[str, Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features['words']
__lowercase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__lowercase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowercase = images
return encoded_inputs
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def a__ ( self : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 688 | 0 |
'''simple docstring'''
import unittest
import numpy as np
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray | None = None , ) -> np.ndarray:
__lowercase = np.shape(SCREAMING_SNAKE_CASE )
__lowercase = np.shape(SCREAMING_SNAKE_CASE )
__lowercase = np.shape(SCREAMING_SNAKE_CASE )
if shape_a[0] != shape_b[0]:
__lowercase = (
'Expected the same number of rows for A and B. '
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
if shape_b[1] != shape_c[1]:
__lowercase = (
'Expected the same number of columns for B and C. '
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = pseudo_inv
if a_inv is None:
try:
__lowercase = np.linalg.inv(SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class A__ ( unittest.TestCase ):
def a__ ( self : List[Any] ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
__lowercase = schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = np.block([[a, b], [b.T, c]] )
__lowercase = np.linalg.det(_UpperCAmelCase )
__lowercase = np.linalg.det(_UpperCAmelCase )
__lowercase = np.linalg.det(_UpperCAmelCase )
self.assertAlmostEqual(_UpperCAmelCase , det_a * det_s )
def a__ ( self : Union[str, Any] ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_UpperCAmelCase ):
schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_UpperCAmelCase ):
schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 718 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ :
lowerCAmelCase__ : Optional[int] = "dummy_data"
lowerCAmelCase__ : str = "datasets"
lowerCAmelCase__ : Dict = False
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[Version, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[List[Callable]] = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 0
__lowercase = dataset_name
__lowercase = cache_dir
__lowercase = use_local_dummy_data
__lowercase = config
# download_callbacks take a single url as input
__lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowercase = str(_UpperCAmelCase )
# to be downloaded
__lowercase = None
__lowercase = None
@property
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self._dummy_file is None:
__lowercase = self.download_dummy_data()
return self._dummy_file
@property
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowercase = cached_path(
_UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCAmelCase , force_extract=_UpperCAmelCase )
return os.path.join(_UpperCAmelCase , self.dummy_file_name )
@property
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self._bucket_url is None:
__lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , *_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return self.create_dummy_data_dict(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(_UpperCAmelCase , _UpperCAmelCase )
else:
return self.create_dummy_data_single(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple , *_UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
return path
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return {}
def a__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for single_url in single_urls:
download_callback(_UpperCAmelCase )
else:
__lowercase = single_urls
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) ) for x in single_urls]
else:
__lowercase = single_urls
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) )
__lowercase = value
# make sure that values are unique
if all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , _UpperCAmelCase ) ) for url in data_url )
__lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowercase = [data_url[0]] * len(_UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_UpperCAmelCase )
return dummy_data_list
def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def a__ ( self : int ) -> str:
"""simple docstring"""
pass
def a__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
def _iter_archive_members(_UpperCAmelCase : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__lowercase = Path(self.dummy_file ).parent
__lowercase = path.relative_to(_UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_UpperCAmelCase )
__lowercase = Path(_UpperCAmelCase )
__lowercase = _iter_archive_members(_UpperCAmelCase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_UpperCAmelCase ).as_posix(), file_path.open('rb' )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [paths]
for path in paths:
if os.path.isfile(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_UpperCAmelCase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase )
| 688 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( lowerCAmelCase__ ):
def __init__( self : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=13 , _UpperCAmelCase : str=7 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[Any]=64 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : Optional[Any]=1 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = q_groups
__lowercase = k_groups
__lowercase = v_groups
__lowercase = post_attention_groups
__lowercase = intermediate_groups
__lowercase = output_groups
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def a__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = SqueezeBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = SqueezeBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
__lowercase = SqueezeBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : str ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = SqueezeBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = SqueezeBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = SqueezeBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
((__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase)) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Any = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCAmelCase__ : int = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : int = False
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[int] = False
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase = SqueezeBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , dim=37 )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_UpperCAmelCase )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_UpperCAmelCase )
@slow
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = SqueezeBertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
__lowercase = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
__lowercase = model(_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 3) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-4 ) )
| 719 |
import math
import sys
import cva
import numpy as np
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__lowercase = math.sqrt(SCREAMING_SNAKE_CASE )
__lowercase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> np.ndarray:
__lowercase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__lowercase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , SCREAMING_SNAKE_CASE ):
for j in range(0 , SCREAMING_SNAKE_CASE ):
__lowercase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , ) -> np.ndarray:
__lowercase = np.zeros(img.shape )
__lowercase = get_gauss_kernel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__lowercase = get_slice(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = img_s - img_s[kernel_size // 2, kernel_size // 2]
__lowercase = vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.sum(SCREAMING_SNAKE_CASE ) / np.sum(SCREAMING_SNAKE_CASE )
__lowercase = val
return imga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list ) -> tuple:
__lowercase = args[1] if args[1:] else '../image_data/lena.jpg'
__lowercase = float(args[2] ) if args[2:] else 1.0
__lowercase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__lowercase = int(args[4] )
__lowercase = kernel_size + abs(kernel_size % 2 - 1 )
else:
__lowercase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = parse_args(sys.argv)
SCREAMING_SNAKE_CASE__ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
SCREAMING_SNAKE_CASE__ = img / 255
SCREAMING_SNAKE_CASE__ = out.astype("""float32""")
SCREAMING_SNAKE_CASE__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
SCREAMING_SNAKE_CASE__ = out * 255
SCREAMING_SNAKE_CASE__ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 688 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = ProphetNetTokenizer
lowerCAmelCase__ : str = False
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : str , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = 'UNwant\u00E9d,running'
__lowercase = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowercase = {}
for i, token in enumerate(_UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowercase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
__lowercase = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02] | 720 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : Tuple=4_00 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
__lowercase = size if size is not None else {'height': 18, 'width': 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = apply_ocr
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr' ) )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
pass
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase )
self.assertIsInstance(encoding.boxes , _UpperCAmelCase )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowercase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__lowercase = Image.open(ds[0]['file'] ).convert('RGB' )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowercase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__lowercase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase )
self.assertListEqual(encoding.boxes , _UpperCAmelCase )
# with apply_OCR = False
__lowercase = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 688 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "retribert"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=3_05_22 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=5_12 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=1e-1_2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = share_encoders
__lowercase = projection_dim
| 721 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "umt5"
lowerCAmelCase__ : Tuple = ["past_key_values"]
def __init__( self : str , _UpperCAmelCase : int=25_01_12 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : List[str]=64 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : str=8 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : str=32 , _UpperCAmelCase : Optional[int]=1_28 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : str=1e-6 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str="gated-gelu" , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Tuple="T5Tokenizer" , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : int=1 , _UpperCAmelCase : List[str]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=_UpperCAmelCase , tokenizer_class=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_heads
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = self.feed_forward_proj.split('-' )
__lowercase = act_info[-1]
__lowercase = act_info[0] == 'gated'
if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__lowercase = 'gelu_new'
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.d_model
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.num_heads
@property
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.num_layers
class A__ ( lowerCAmelCase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowercase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase = 'past_encoder_sequence + sequence'
__lowercase = {0: 'batch'}
__lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return 13
@property
def a__ ( self : Dict ) -> float:
"""simple docstring"""
return 5e-4
| 688 | 0 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """T5Config"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : jnp.array , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> jnp.ndarray:
__lowercase = jnp.zeros_like(SCREAMING_SNAKE_CASE )
__lowercase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__lowercase = shifted_input_ids.at[:, 0].set(SCREAMING_SNAKE_CASE )
__lowercase = jnp.where(shifted_input_ids == -100 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return shifted_input_ids
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = "mt5"
lowerCAmelCase__ : List[str] = MTaConfig
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = "mt5"
lowerCAmelCase__ : Optional[Any] = MTaConfig
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Dict = "mt5"
lowerCAmelCase__ : List[Any] = MTaConfig
| 700 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : int=5_02_65 , _UpperCAmelCase : Union[str, Any]=7_68 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[str]=30_72 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Tuple=1_28 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : int=64 , _UpperCAmelCase : List[str]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=2_24 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = version.parse("1.12" )
@property
def a__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : Any ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return 12
def a__ ( self : Tuple , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 688 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A__ ( unittest.TestCase ):
def __init__( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=13 , _UpperCAmelCase : Union[str, Any]=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Any=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : int=5 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Optional[Any]=37 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=5_12 , _UpperCAmelCase : str=16 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[int]=4 , ) -> List[str]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_attention_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_choices
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_attention_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Union[str, Any] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = FlaxAlbertModelTester(self )
@slow
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained('albert-base-v2' )
__lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = FlaxAlbertModel.from_pretrained('albert-base-v2' )
__lowercase = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = (1, 11, 7_68)
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 701 |
from pathlib import Path
import numpy as np
from PIL import Image
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase , __lowercase , __lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase = np.zeros_like(SCREAMING_SNAKE_CASE )
__lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
SCREAMING_SNAKE_CASE__ = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 688 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> str:
__lowercase = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__lowercase = ''
__lowercase = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(SCREAMING_SNAKE_CASE ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__lowercase , __lowercase = 0, 0
# length[i] shows the length of palindromic substring with center i
__lowercase = [1 for i in range(len(SCREAMING_SNAKE_CASE ) )]
# for each character in new_string find corresponding palindromic string
__lowercase = 0
for j in range(len(SCREAMING_SNAKE_CASE ) ):
__lowercase = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(SCREAMING_SNAKE_CASE )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__lowercase = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__lowercase = j - k + 1 # noqa: E741
__lowercase = j + k - 1
# update max_length and start position
if max_length < length[j]:
__lowercase = length[j]
__lowercase = j
# create that string
__lowercase = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = ["pixel_values"]
def __init__( self : Tuple , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 2_55 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : bool = True , **_UpperCAmelCase : str , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = size if size is not None else {'height': 3_84, 'width': 3_84}
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase = do_convert_rgb
def a__ ( self : int , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
__lowercase = (size['height'], size['width'])
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : ImageInput , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Dict[str, int]] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : int , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__lowercase = BatchFeature(data={'pixel_values': images} , tensor_type=_UpperCAmelCase )
return encoded_outputs
| 688 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : str ) -> str:
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def a__ ( self : str , _UpperCAmelCase : List[str]=None ) -> Tuple:
"""simple docstring"""
__lowercase = {}
if top_k is not None:
__lowercase = top_k
return {}, {}, postprocess_params
def __call__( self : Tuple , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = load_image(_UpperCAmelCase )
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def a__ ( self : Tuple , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model(**_UpperCAmelCase )
return model_outputs
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple=5 ) -> Any:
"""simple docstring"""
if top_k > self.model.config.num_labels:
__lowercase = self.model.config.num_labels
if self.framework == "pt":
__lowercase = model_outputs.logits.softmax(-1 )[0]
__lowercase , __lowercase = probs.topk(_UpperCAmelCase )
elif self.framework == "tf":
__lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0]
__lowercase = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase )
__lowercase , __lowercase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__lowercase = scores.tolist()
__lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 0 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class A__ ( lowerCAmelCase__ ):
def __init__( self : int , _UpperCAmelCase : Any=0.01 , _UpperCAmelCase : Optional[Any]=10_00 ) -> Dict:
"""simple docstring"""
__lowercase = p_stop
__lowercase = max_length
def __iter__( self : Dict ) -> int:
"""simple docstring"""
__lowercase = 0
__lowercase = False
while not stop and count < self.max_length:
yield count
count += 1
__lowercase = random.random() < self.p_stop
class A__ ( unittest.TestCase ):
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Union[str, Any]=True ) -> Optional[Any]:
"""simple docstring"""
__lowercase = [
BatchSamplerShard(_UpperCAmelCase , 2 , _UpperCAmelCase , split_batches=_UpperCAmelCase , even_batches=_UpperCAmelCase )
for i in range(2 )
]
__lowercase = [list(_UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_UpperCAmelCase ) for shard in batch_sampler_shards] , [len(_UpperCAmelCase ) for e in expected] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowercase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowercase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowercase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowercase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [[], []]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowercase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowercase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowercase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [[], []]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase )
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowercase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowercase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowercase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowercase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowercase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [[[0, 1]], []]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [[], []]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase , even_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase , even_batches=_UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowercase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase , even_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase , even_batches=_UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowercase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase , even_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase , even_batches=_UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowercase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [[[0, 1]], []]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase , even_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [[], []]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase , even_batches=_UpperCAmelCase )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__lowercase = [BatchSamplerShard(_UpperCAmelCase , 2 , _UpperCAmelCase , even_batches=_UpperCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a__ ( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Dict=False ) -> Any:
"""simple docstring"""
random.seed(_UpperCAmelCase )
__lowercase = list(_UpperCAmelCase )
__lowercase = [
IterableDatasetShard(
_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase , num_processes=_UpperCAmelCase , process_index=_UpperCAmelCase , split_batches=_UpperCAmelCase , )
for i in range(_UpperCAmelCase )
]
__lowercase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_UpperCAmelCase )
iterable_dataset_lists.append(list(_UpperCAmelCase ) )
__lowercase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__lowercase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
self.assertTrue(len(_UpperCAmelCase ) % shard_batch_size == 0 )
__lowercase = []
for idx in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_UpperCAmelCase ) < len(_UpperCAmelCase ):
reference += reference
self.assertListEqual(_UpperCAmelCase , reference[: len(_UpperCAmelCase )] )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = 42
__lowercase = RandomIterableDataset()
self.check_iterable_dataset_shards(_UpperCAmelCase , _UpperCAmelCase , batch_size=4 , drop_last=_UpperCAmelCase , split_batches=_UpperCAmelCase )
self.check_iterable_dataset_shards(_UpperCAmelCase , _UpperCAmelCase , batch_size=4 , drop_last=_UpperCAmelCase , split_batches=_UpperCAmelCase )
self.check_iterable_dataset_shards(_UpperCAmelCase , _UpperCAmelCase , batch_size=4 , drop_last=_UpperCAmelCase , split_batches=_UpperCAmelCase )
self.check_iterable_dataset_shards(_UpperCAmelCase , _UpperCAmelCase , batch_size=4 , drop_last=_UpperCAmelCase , split_batches=_UpperCAmelCase )
# Edge case with a very small dataset
__lowercase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_UpperCAmelCase , _UpperCAmelCase , batch_size=4 , drop_last=_UpperCAmelCase , split_batches=_UpperCAmelCase )
self.check_iterable_dataset_shards(_UpperCAmelCase , _UpperCAmelCase , batch_size=4 , drop_last=_UpperCAmelCase , split_batches=_UpperCAmelCase )
self.check_iterable_dataset_shards(_UpperCAmelCase , _UpperCAmelCase , batch_size=4 , drop_last=_UpperCAmelCase , split_batches=_UpperCAmelCase )
self.check_iterable_dataset_shards(_UpperCAmelCase , _UpperCAmelCase , batch_size=4 , drop_last=_UpperCAmelCase , split_batches=_UpperCAmelCase )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = BatchSampler(range(16 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = SkipBatchSampler(_UpperCAmelCase , 2 )
self.assertListEqual(list(_UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = DataLoader(list(range(16 ) ) , batch_size=4 )
__lowercase = skip_first_batches(_UpperCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(_UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
Accelerator()
__lowercase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(_UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 704 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Any:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__lowercase = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__lowercase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__lowercase = 4
__lowercase = True
# hparam_utils.py hparams
__lowercase = 0.664_694
__lowercase = 0.207_951
__lowercase = 0.121_194
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = 0.0_352_513
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowercase = 4
__lowercase = False
# hparam_utils.py hparams
__lowercase = 36.4_519
__lowercase = 0.903_421
__lowercase = 222.088
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 0.763_141
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__lowercase = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__lowercase = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__lowercase = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
__lowercase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> List[Any]:
__lowercase = argparse.ArgumentParser(add_help=SCREAMING_SNAKE_CASE , allow_abbrev=SCREAMING_SNAKE_CASE )
# The main config parser
__lowercase = config_command_parser(SCREAMING_SNAKE_CASE )
# The subparser to add commands to
__lowercase = config_parser.add_subparsers(title='subcommands' , dest='subcommand' )
# Then add other parsers with the parent parser
default_command_parser(SCREAMING_SNAKE_CASE , parents=[parent_parser] )
update_command_parser(SCREAMING_SNAKE_CASE , parents=[parent_parser] )
return config_parser
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__lowercase = get_config_parser()
__lowercase = config_parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
config_parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 705 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __SCREAMING_SNAKE_CASE ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 688 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class A__ :
lowerCAmelCase__ : CommonSchedulerState
# setable values
lowerCAmelCase__ : jnp.ndarray
lowerCAmelCase__ : jnp.ndarray
lowerCAmelCase__ : Optional[int] = None
@classmethod
def a__ ( cls : List[str] , _UpperCAmelCase : CommonSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray ) -> Optional[int]:
"""simple docstring"""
return cls(common=_UpperCAmelCase , init_noise_sigma=_UpperCAmelCase , timesteps=_UpperCAmelCase )
@dataclass
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : DDPMSchedulerState
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCAmelCase__ : jnp.dtype
@property
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
return True
@register_to_config
def __init__( self : Dict , _UpperCAmelCase : int = 10_00 , _UpperCAmelCase : float = 0.0_001 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : str = "linear" , _UpperCAmelCase : Optional[jnp.ndarray] = None , _UpperCAmelCase : str = "fixed_small" , _UpperCAmelCase : bool = True , _UpperCAmelCase : str = "epsilon" , _UpperCAmelCase : jnp.dtype = jnp.floataa , ) -> Tuple:
"""simple docstring"""
__lowercase = dtype
def a__ ( self : Tuple , _UpperCAmelCase : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
__lowercase = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__lowercase = jnp.array(1.0 , dtype=self.dtype )
__lowercase = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_UpperCAmelCase , init_noise_sigma=_UpperCAmelCase , timesteps=_UpperCAmelCase , )
def a__ ( self : List[str] , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : Optional[int] = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def a__ ( self : Optional[Any] , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : int , _UpperCAmelCase : Tuple = () ) -> DDPMSchedulerState:
"""simple docstring"""
__lowercase = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__lowercase = (jnp.arange(0 , _UpperCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase , )
def a__ ( self : Tuple , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Dict=None ) -> Any:
"""simple docstring"""
__lowercase = state.common.alphas_cumprod[t]
__lowercase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowercase = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__lowercase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__lowercase = jnp.clip(_UpperCAmelCase , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__lowercase = jnp.log(jnp.clip(_UpperCAmelCase , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
__lowercase = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__lowercase = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__lowercase = variance
__lowercase = state.common.betas[t]
__lowercase = (predicted_variance + 1) / 2
__lowercase = frac * max_log + (1 - frac) * min_log
return variance
def a__ ( self : Tuple , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : Optional[jax.random.KeyArray] = None , _UpperCAmelCase : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
__lowercase = timestep
if key is None:
__lowercase = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__lowercase , __lowercase = jnp.split(_UpperCAmelCase , sample.shape[1] , axis=1 )
else:
__lowercase = None
# 1. compute alphas, betas
__lowercase = state.common.alphas_cumprod[t]
__lowercase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__lowercase = 1 - alpha_prod_t
__lowercase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowercase = model_output
elif self.config.prediction_type == "v_prediction":
__lowercase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowercase = jnp.clip(_UpperCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__lowercase = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__lowercase = jax.random.split(_UpperCAmelCase , num=1 )
__lowercase = jax.random.normal(_UpperCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_UpperCAmelCase , _UpperCAmelCase , predicted_variance=_UpperCAmelCase ) ** 0.5) * noise
__lowercase = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__lowercase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_UpperCAmelCase , state=_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Tuple , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __len__( self : int ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 706 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """Hello, World!"""
SCREAMING_SNAKE_CASE__ = """en_XX"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ) -> Optional[int]:
__lowercase = Path('data_bin' )
__lowercase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe='sentencepiece' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
__lowercase = xmod.model.encoder.sentence_encoder
__lowercase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , SCREAMING_SNAKE_CASE )
__lowercase = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase = xmod_sent_encoder.embed_tokens.weight
__lowercase = xmod_sent_encoder.embed_positions.weight
__lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase = xmod_sent_encoder.layernorm_embedding.weight
__lowercase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase = model.roberta.encoder.layer[i]
__lowercase = xmod_sent_encoder.layers[i]
# self attention
__lowercase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowercase = xmod_layer.self_attn.q_proj.weight
__lowercase = xmod_layer.self_attn.q_proj.bias
__lowercase = xmod_layer.self_attn.k_proj.weight
__lowercase = xmod_layer.self_attn.k_proj.bias
__lowercase = xmod_layer.self_attn.v_proj.weight
__lowercase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowercase = xmod_layer.self_attn.out_proj.weight
__lowercase = xmod_layer.self_attn.out_proj.bias
__lowercase = xmod_layer.self_attn_layer_norm.weight
__lowercase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
# output
__lowercase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
__lowercase = xmod_layer.final_layer_norm.weight
__lowercase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase = xmod_layer.adapter_layer_norm.weight
__lowercase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase = bert_output.adapter_modules[lang_code]
__lowercase = xmod_layer.adapter_modules[lang_code]
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase = xmod_sent_encoder.layer_norm.weight
__lowercase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].dense.weight
__lowercase = xmod.model.classification_heads['mnli'].dense.bias
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight
__lowercase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowercase = xmod.model.encoder.lm_head.dense.weight
__lowercase = xmod.model.encoder.lm_head.dense.bias
__lowercase = xmod.model.encoder.lm_head.layer_norm.weight
__lowercase = xmod.model.encoder.lm_head.layer_norm.bias
__lowercase = xmod.model.encoder.lm_head.weight
__lowercase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
__lowercase = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
__lowercase = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowercase = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 688 | 0 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
__lowercase = AutoTokenizer.from_pretrained('xlm-roberta-base' )
__lowercase = 'The dog is cute and lives in the garden house'
__lowercase = jnp.array([tokenizer.encode(_UpperCAmelCase )] )
__lowercase = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
__lowercase = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
__lowercase = model(_UpperCAmelCase )['last_hidden_state']
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1e-3 ) )
| 707 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ) -> float:
__lowercase = u
for i in range(1 , SCREAMING_SNAKE_CASE ):
__lowercase = temp * (u - i)
return temp
def __SCREAMING_SNAKE_CASE ( ) -> None:
__lowercase = int(input('enter the numbers of values: ' ) )
__lowercase = []
for _ in range(SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
y[i].append(SCREAMING_SNAKE_CASE )
__lowercase = 0
print('enter the values of parameters in a list: ' )
__lowercase = list(map(SCREAMING_SNAKE_CASE , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(SCREAMING_SNAKE_CASE ):
__lowercase = float(input() )
__lowercase = int(input('enter the value to interpolate: ' ) )
__lowercase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
__lowercase = y[j + 1][i - 1] - y[j][i - 1]
__lowercase = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
summ += (ucal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 688 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__lowercase = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=SCREAMING_SNAKE_CASE )
__lowercase = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
TestCommand.register_subcommand(SCREAMING_SNAKE_CASE )
RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE )
DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Parse args
__lowercase , __lowercase = parser.parse_known_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
__lowercase = parse_unknown_args(SCREAMING_SNAKE_CASE )
# Run
__lowercase = args.func(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 708 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
__lowercase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ) -> float:
if digit_amount > 0:
return round(number - int(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
return number - int(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 709 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
__lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 688 | 0 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
SCREAMING_SNAKE_CASE__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
SCREAMING_SNAKE_CASE__ = tuple[int, int]
class A__ :
def __init__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Node | None , ) -> None:
"""simple docstring"""
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def a__ ( self : List[Any] ) -> float:
"""simple docstring"""
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_UpperCAmelCase ) + abs(_UpperCAmelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[Any] , _UpperCAmelCase : Node ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : TPosition , _UpperCAmelCase : TPosition ) -> Optional[Any]:
"""simple docstring"""
__lowercase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _UpperCAmelCase )
__lowercase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _UpperCAmelCase )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def a__ ( self : Tuple ) -> list[TPosition]:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_UpperCAmelCase )
self.closed_nodes.append(_UpperCAmelCase )
__lowercase = self.get_successors(_UpperCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_UpperCAmelCase )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(_UpperCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_UpperCAmelCase )
else:
self.open_nodes.append(_UpperCAmelCase )
return [self.start.pos]
def a__ ( self : List[str] , _UpperCAmelCase : Node ) -> list[Node]:
"""simple docstring"""
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_UpperCAmelCase , _UpperCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _UpperCAmelCase , ) )
return successors
def a__ ( self : Optional[int] , _UpperCAmelCase : Node | None ) -> list[TPosition]:
"""simple docstring"""
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class A__ :
def __init__( self : List[str] , _UpperCAmelCase : TPosition , _UpperCAmelCase : TPosition ) -> None:
"""simple docstring"""
__lowercase = AStar(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = AStar(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = False
def a__ ( self : List[Any] ) -> list[TPosition]:
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_UpperCAmelCase , _UpperCAmelCase )
self.fwd_astar.closed_nodes.append(_UpperCAmelCase )
self.bwd_astar.closed_nodes.append(_UpperCAmelCase )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(_UpperCAmelCase ),
self.bwd_astar: self.bwd_astar.get_successors(_UpperCAmelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_UpperCAmelCase )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(_UpperCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_UpperCAmelCase )
else:
astar.open_nodes.append(_UpperCAmelCase )
return [self.fwd_astar.start.pos]
def a__ ( self : Dict , _UpperCAmelCase : Node , _UpperCAmelCase : Node ) -> list[TPosition]:
"""simple docstring"""
__lowercase = self.fwd_astar.retrace_path(_UpperCAmelCase )
__lowercase = self.bwd_astar.retrace_path(_UpperCAmelCase )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
SCREAMING_SNAKE_CASE__ = (0, 0)
SCREAMING_SNAKE_CASE__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = AStar(init, goal)
SCREAMING_SNAKE_CASE__ = a_star.search()
SCREAMING_SNAKE_CASE__ = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = BidirectionalAStar(init, goal)
SCREAMING_SNAKE_CASE__ = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 710 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = ProphetNetTokenizer
lowerCAmelCase__ : str = False
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : str , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = 'UNwant\u00E9d,running'
__lowercase = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowercase = {}
for i, token in enumerate(_UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowercase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
__lowercase = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 688 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "blip_2_vision_model"
def __init__( self : int , _UpperCAmelCase : Dict=14_08 , _UpperCAmelCase : Union[str, Any]=61_44 , _UpperCAmelCase : List[Any]=39 , _UpperCAmelCase : str=16 , _UpperCAmelCase : Any=2_24 , _UpperCAmelCase : int=14 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : str=0.00_001 , _UpperCAmelCase : List[Any]=0.0 , _UpperCAmelCase : Any=1e-1_0 , _UpperCAmelCase : Dict=True , **_UpperCAmelCase : int , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_range
__lowercase = attention_dropout
__lowercase = layer_norm_eps
__lowercase = hidden_act
__lowercase = qkv_bias
@classmethod
def a__ ( cls : int , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Union[str, Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
__lowercase , __lowercase = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
__lowercase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = "blip_2_qformer"
def __init__( self : Dict , _UpperCAmelCase : Optional[int]=3_05_22 , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : int=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Any=5_12 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1e-1_2 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Any=2 , _UpperCAmelCase : str=14_08 , **_UpperCAmelCase : Dict , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = cross_attention_frequency
__lowercase = encoder_hidden_size
@classmethod
def a__ ( cls : Optional[int] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Any ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
__lowercase , __lowercase = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
__lowercase = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = "blip-2"
lowerCAmelCase__ : int = True
def __init__( self : int , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Optional[int]=32 , **_UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__lowercase = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
__lowercase = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
__lowercase = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__lowercase = BlipaVisionConfig(**_UpperCAmelCase )
__lowercase = BlipaQFormerConfig(**_UpperCAmelCase )
__lowercase = text_config['model_type'] if 'model_type' in text_config else 'opt'
__lowercase = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__lowercase = self.text_config.tie_word_embeddings
__lowercase = self.text_config.is_encoder_decoder
__lowercase = num_query_tokens
__lowercase = self.vision_config.hidden_size
__lowercase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__lowercase = 1.0
__lowercase = 0.02
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : BlipaVisionConfig , _UpperCAmelCase : BlipaQFormerConfig , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : List[Any] , ) -> str:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.vision_config.to_dict()
__lowercase = self.qformer_config.to_dict()
__lowercase = self.text_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 711 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""jukebox""": 512,
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase__ : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=["v3", "v2", "v2"] , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Union[str, Any]="<|endoftext|>" , **_UpperCAmelCase : Tuple , ) -> List[Any]:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
unk_token=_UpperCAmelCase , n_genres=_UpperCAmelCase , version=_UpperCAmelCase , max_n_lyric_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = version
__lowercase = max_n_lyric_tokens
__lowercase = n_genres
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
__lowercase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowercase = oov.replace(R'\-\'' , R'\-+\'' )
__lowercase = regex.compile(_UpperCAmelCase )
__lowercase = {v: k for k, v in self.artists_encoder.items()}
__lowercase = {v: k for k, v in self.genres_encoder.items()}
__lowercase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = [self.artists_encoder.get(_UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(_UpperCAmelCase ) ):
__lowercase = [self.genres_encoder.get(_UpperCAmelCase , 0 ) for genre in list_genres[genres]]
__lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowercase = [[self.lyrics_encoder.get(_UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def a__ ( self : str , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
return list(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self.prepare_for_tokenization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = self._tokenize(_UpperCAmelCase )
return artist, genre, lyrics
def a__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowercase = artists[idx].lower()
__lowercase = [genres[idx].lower()]
else:
__lowercase = self._normalize(artists[idx] ) + '.v2'
__lowercase = [
self._normalize(_UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
__lowercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
__lowercase = {vocab[index]: index + 1 for index in range(len(_UpperCAmelCase ) )}
__lowercase = 0
__lowercase = len(_UpperCAmelCase ) + 1
__lowercase = self.vocab
__lowercase = {v: k for k, v in self.vocab.items()}
__lowercase = ''
else:
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
__lowercase = self._run_strip_accents(_UpperCAmelCase )
__lowercase = lyrics.replace('\\' , '\n' )
__lowercase = self.out_of_vocab.sub('' , _UpperCAmelCase ), [], []
return artists, genres, lyrics
def a__ ( self : Tuple , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = unicodedata.normalize('NFD' , _UpperCAmelCase )
__lowercase = []
for char in text:
__lowercase = unicodedata.category(_UpperCAmelCase )
if cat == "Mn":
continue
output.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = (
[chr(_UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
__lowercase = frozenset(_UpperCAmelCase )
__lowercase = re.compile(R'_+' )
__lowercase = ''.join([c if c in accepted else '_' for c in text.lower()] )
__lowercase = pattern.sub('_' , _UpperCAmelCase ).strip('_' )
return text
def a__ ( self : List[str] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return " ".join(_UpperCAmelCase )
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = False ) -> int:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = TensorType(_UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
__lowercase = tf.constant
__lowercase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
__lowercase = torch.tensor
__lowercase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
__lowercase = jnp.array
__lowercase = _is_jax
else:
__lowercase = np.asarray
__lowercase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowercase = [inputs]
if not is_tensor(_UpperCAmelCase ):
__lowercase = as_tensor(_UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int="" , _UpperCAmelCase : Tuple="pt" ) -> BatchEncoding:
"""simple docstring"""
__lowercase = [0, 0, 0]
__lowercase = [artist] * len(self.version )
__lowercase = [genres] * len(self.version )
__lowercase , __lowercase , __lowercase = self.tokenize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase , __lowercase , __lowercase = self._convert_token_to_id(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = [-INFINITY] * len(full_tokens[-1] )
__lowercase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def a__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def a__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.artists_decoder.get(_UpperCAmelCase )
__lowercase = [self.genres_decoder.get(_UpperCAmelCase ) for genre in genres_index]
__lowercase = [self.lyrics_decoder.get(_UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 688 | 0 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : str="" , _UpperCAmelCase : int="train" ) -> Tuple:
"""simple docstring"""
assert os.path.isdir(_UpperCAmelCase )
__lowercase = []
__lowercase = os.listdir(_UpperCAmelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__lowercase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isfile(_UpperCAmelCase ):
continue
self.documents.append(_UpperCAmelCase )
def __len__( self : List[str] ) -> List[str]:
"""simple docstring"""
return len(self.documents )
def __getitem__( self : int , _UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
__lowercase = self.documents[idx]
__lowercase = document_path.split('/' )[-1]
with open(_UpperCAmelCase , encoding='utf-8' ) as source:
__lowercase = source.read()
__lowercase , __lowercase = process_story(_UpperCAmelCase )
return document_name, story_lines, summary_lines
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> Dict:
__lowercase = list(filter(lambda SCREAMING_SNAKE_CASE : len(SCREAMING_SNAKE_CASE ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
__lowercase = [_add_missing_period(SCREAMING_SNAKE_CASE ) for line in nonempty_lines]
# gather article lines
__lowercase = []
__lowercase = deque(SCREAMING_SNAKE_CASE )
while True:
try:
__lowercase = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(SCREAMING_SNAKE_CASE )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__lowercase = list(filter(lambda SCREAMING_SNAKE_CASE : not t.startswith('@highlight' ) , SCREAMING_SNAKE_CASE ) )
return story_lines, summary_lines
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> str:
__lowercase = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
if len(SCREAMING_SNAKE_CASE ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE )) )
return sequence
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ) -> List[Any]:
__lowercase = torch.ones_like(SCREAMING_SNAKE_CASE )
__lowercase = sequence == pad_token_id
__lowercase = 0
return mask
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
__lowercase = [tokenizer.encode(SCREAMING_SNAKE_CASE ) for line in story_lines]
__lowercase = [token for sentence in story_lines_token_ids for token in sentence]
__lowercase = [tokenizer.encode(SCREAMING_SNAKE_CASE ) for line in summary_lines]
__lowercase = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
__lowercase = []
for sequence in batch:
__lowercase = -1
__lowercase = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(SCREAMING_SNAKE_CASE )
return torch.tensor(SCREAMING_SNAKE_CASE )
| 712 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=36 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Any=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
__lowercase = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[Any] = True
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int=False ) -> Tuple:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = AlbertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a__ ( self : int ) -> Any:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = AlbertModel.from_pretrained('albert-base-v2' )
__lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 688 | 0 |
from __future__ import annotations
from math import ceil, floor, sqrt
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 2000000 ) -> int:
__lowercase = [0]
__lowercase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowercase = 0
# the area corresponding to the grid that gives the product closest to target
__lowercase = 0
# an estimate of b, using the quadratic formula
__lowercase = 42
# the largest integer less than b_estimate
__lowercase = 42
# the largest integer less than b_estimate
__lowercase = 42
# the triangle number corresponding to b_floor
__lowercase = 42
# the triangle number corresponding to b_ceil
__lowercase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowercase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowercase = floor(SCREAMING_SNAKE_CASE )
__lowercase = ceil(SCREAMING_SNAKE_CASE )
__lowercase = triangle_numbers[b_floor]
__lowercase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowercase = triangle_b_first_guess * triangle_a
__lowercase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowercase = triangle_b_second_guess * triangle_a
__lowercase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 713 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : str = "▁" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[str, AddedToken] = "<unk>" , _UpperCAmelCase : Union[str, AddedToken] = "</s>" , _UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['token']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__lowercase = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__lowercase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> str:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : int , _UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> Dict:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['unk']['id']
__lowercase = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 688 | 0 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any=8 ) -> Optional[int]:
__lowercase = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__lowercase = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : MultilingualCLIP , _UpperCAmelCase : XLMRobertaTokenizer , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , _UpperCAmelCase : VQModel , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , movq=_UpperCAmelCase , )
__lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def a__ ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
if latents is None:
__lowercase = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__lowercase = latents.to(_UpperCAmelCase )
__lowercase = latents * scheduler.init_noise_sigma
return latents
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = len(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else 1
# get prompt text embeddings
__lowercase = self.tokenizer(
_UpperCAmelCase , padding='max_length' , truncation=_UpperCAmelCase , max_length=77 , return_attention_mask=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors='pt' , )
__lowercase = text_inputs.input_ids
__lowercase = self.tokenizer(_UpperCAmelCase , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__lowercase = text_input_ids.to(_UpperCAmelCase )
__lowercase = text_inputs.attention_mask.to(_UpperCAmelCase )
__lowercase , __lowercase = self.text_encoder(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
__lowercase = prompt_embeds.repeat_interleave(_UpperCAmelCase , dim=0 )
__lowercase = text_encoder_hidden_states.repeat_interleave(_UpperCAmelCase , dim=0 )
__lowercase = text_mask.repeat_interleave(_UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
__lowercase = 42
if negative_prompt is None:
__lowercase = [''] * batch_size
elif type(_UpperCAmelCase ) is not type(_UpperCAmelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_UpperCAmelCase )} !="""
f""" {type(_UpperCAmelCase )}.""" )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [negative_prompt]
elif batch_size != len(_UpperCAmelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_UpperCAmelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
__lowercase = negative_prompt
__lowercase = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=77 , truncation=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors='pt' , )
__lowercase = uncond_input.input_ids.to(_UpperCAmelCase )
__lowercase = uncond_input.attention_mask.to(_UpperCAmelCase )
__lowercase , __lowercase = self.text_encoder(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowercase = negative_prompt_embeds.shape[1]
__lowercase = negative_prompt_embeds.repeat(1 , _UpperCAmelCase )
__lowercase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _UpperCAmelCase )
__lowercase = uncond_text_encoder_hidden_states.shape[1]
__lowercase = uncond_text_encoder_hidden_states.repeat(1 , _UpperCAmelCase , 1 )
__lowercase = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _UpperCAmelCase , -1 )
__lowercase = uncond_text_mask.repeat_interleave(_UpperCAmelCase , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([negative_prompt_embeds, prompt_embeds] )
__lowercase = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__lowercase = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def a__ ( self : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> List[str]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowercase = torch.device(f"""cuda:{gpu_id}""" )
__lowercase = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : List[str]=0 ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowercase = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowercase = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__lowercase , __lowercase = cpu_offload_with_hook(_UpperCAmelCase , _UpperCAmelCase , prev_module_hook=_UpperCAmelCase )
if self.safety_checker is not None:
__lowercase , __lowercase = cpu_offload_with_hook(self.safety_checker , _UpperCAmelCase , prev_module_hook=_UpperCAmelCase )
# We'll offload the last model manually.
__lowercase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a__ ( self : Any ) -> Any:
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCAmelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCAmelCase )
def __call__( self : Optional[int] , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCAmelCase : Optional[Union[str, List[str]]] = None , _UpperCAmelCase : int = 5_12 , _UpperCAmelCase : int = 5_12 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 4.0 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , ) -> str:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = 1
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = len(_UpperCAmelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_UpperCAmelCase )}""" )
__lowercase = self._execution_device
__lowercase = batch_size * num_images_per_prompt
__lowercase = guidance_scale > 1.0
__lowercase , __lowercase , __lowercase = self._encode_prompt(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = torch.cat(_UpperCAmelCase , dim=0 )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = torch.cat(_UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
__lowercase = image_embeds.repeat_interleave(_UpperCAmelCase , dim=0 )
__lowercase = negative_image_embeds.repeat_interleave(_UpperCAmelCase , dim=0 )
__lowercase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_UpperCAmelCase )
self.scheduler.set_timesteps(_UpperCAmelCase , device=_UpperCAmelCase )
__lowercase = self.scheduler.timesteps
__lowercase = self.unet.config.in_channels
__lowercase , __lowercase = get_new_h_w(_UpperCAmelCase , _UpperCAmelCase , self.movq_scale_factor )
# create initial latent
__lowercase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__lowercase = self.unet(
sample=_UpperCAmelCase , timestep=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , added_cond_kwargs=_UpperCAmelCase , return_dict=_UpperCAmelCase , )[0]
if do_classifier_free_guidance:
__lowercase , __lowercase = noise_pred.split(latents.shape[1] , dim=1 )
__lowercase , __lowercase = noise_pred.chunk(2 )
__lowercase , __lowercase = variance_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowercase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowercase , __lowercase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase , ).prev_sample
# post-processing
__lowercase = self.movq.decode(_UpperCAmelCase , force_not_quantize=_UpperCAmelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__lowercase = image * 0.5 + 0.5
__lowercase = image.clamp(0 , 1 )
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 714 |
import string
from math import logaa
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> int:
__lowercase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__lowercase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> tuple[int, int]:
__lowercase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase = corpus_without_punctuation.split('\n' )
__lowercase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
return round(tf * idf , 3 )
| 688 | 0 |
import numpy as np
SCREAMING_SNAKE_CASE__ = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class A__ :
def __init__( self : Dict ) -> None:
"""simple docstring"""
__lowercase = np.array(_UpperCAmelCase )
def a__ ( self : List[str] , _UpperCAmelCase : str ) -> np.ndarray:
"""simple docstring"""
__lowercase , __lowercase = np.where(letter == self.SQUARE )
__lowercase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def a__ ( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def a__ ( self : List[Any] , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = message.lower()
__lowercase = message.replace(' ' , '' )
__lowercase = message.replace('j' , 'i' )
__lowercase = np.empty((2, len(_UpperCAmelCase )) )
for letter_index in range(len(_UpperCAmelCase ) ):
__lowercase = self.letter_to_numbers(message[letter_index] )
__lowercase = numbers[0]
__lowercase = numbers[1]
__lowercase = first_step.reshape(2 * len(_UpperCAmelCase ) )
__lowercase = ''
for numbers_index in range(len(_UpperCAmelCase ) ):
__lowercase = int(second_step[numbers_index * 2] )
__lowercase = int(second_step[(numbers_index * 2) + 1] )
__lowercase = self.numbers_to_letter(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = encoded_message + letter
return encoded_message
def a__ ( self : Any , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = message.lower()
message.replace(' ' , '' )
__lowercase = np.empty(2 * len(_UpperCAmelCase ) )
for letter_index in range(len(_UpperCAmelCase ) ):
__lowercase = self.letter_to_numbers(message[letter_index] )
__lowercase = numbers[0]
__lowercase = numbers[1]
__lowercase = first_step.reshape((2, len(_UpperCAmelCase )) )
__lowercase = ''
for numbers_index in range(len(_UpperCAmelCase ) ):
__lowercase = int(second_step[0, numbers_index] )
__lowercase = int(second_step[1, numbers_index] )
__lowercase = self.numbers_to_letter(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = decoded_message + letter
return decoded_message
| 715 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "retribert"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=3_05_22 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=5_12 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=1e-1_2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = share_encoders
__lowercase = projection_dim
| 688 | 0 |
import json
import sys
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
with open(SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__lowercase = json.load(SCREAMING_SNAKE_CASE )
__lowercase = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(SCREAMING_SNAKE_CASE ):
__lowercase = results[benchmark_name]
__lowercase = benchmark_name.split('/' )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
__lowercase = '| metric |'
__lowercase = '|--------|'
__lowercase = '| new / old (diff) |'
for metric_name in sorted(SCREAMING_SNAKE_CASE ):
__lowercase = benchmark_res[metric_name]
__lowercase = metric_vals['new']
__lowercase = metric_vals.get('old' , SCREAMING_SNAKE_CASE )
__lowercase = metric_vals.get('diff' , SCREAMING_SNAKE_CASE )
__lowercase = F""" {new_val:f}""" if isinstance(SCREAMING_SNAKE_CASE , (int, float) ) else 'None'
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = sys.argv[1]
SCREAMING_SNAKE_CASE__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class A__ ( unittest.TestCase , lowerCAmelCase__ ):
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = load_tool('text-to-speech' )
self.tool.setup()
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = self.tool('hey' )
__lowercase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = self.tool('hey' )
__lowercase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 717 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = ["image_processor", "tokenizer"]
lowerCAmelCase__ : Union[str, Any] = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ : Union[str, Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features['words']
__lowercase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__lowercase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowercase = images
return encoded_inputs
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def a__ ( self : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 688 | 0 |
'''simple docstring'''
import inspect
import unittest
class A__ ( unittest.TestCase ):
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
try:
import diffusers # noqa: F401
except ImportError:
assert False
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
import diffusers
from diffusers.dependency_versions_table import deps
__lowercase = inspect.getmembers(_UpperCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__lowercase = 'k-diffusion'
elif backend == "invisible_watermark":
__lowercase = 'invisible-watermark'
assert backend in deps, f"""{backend} is not in the deps table!"""
| 718 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ :
lowerCAmelCase__ : Optional[int] = "dummy_data"
lowerCAmelCase__ : str = "datasets"
lowerCAmelCase__ : Dict = False
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[Version, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[List[Callable]] = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 0
__lowercase = dataset_name
__lowercase = cache_dir
__lowercase = use_local_dummy_data
__lowercase = config
# download_callbacks take a single url as input
__lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowercase = str(_UpperCAmelCase )
# to be downloaded
__lowercase = None
__lowercase = None
@property
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self._dummy_file is None:
__lowercase = self.download_dummy_data()
return self._dummy_file
@property
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowercase = cached_path(
_UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCAmelCase , force_extract=_UpperCAmelCase )
return os.path.join(_UpperCAmelCase , self.dummy_file_name )
@property
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self._bucket_url is None:
__lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , *_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return self.create_dummy_data_dict(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(_UpperCAmelCase , _UpperCAmelCase )
else:
return self.create_dummy_data_single(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple , *_UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
return path
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return {}
def a__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for single_url in single_urls:
download_callback(_UpperCAmelCase )
else:
__lowercase = single_urls
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) ) for x in single_urls]
else:
__lowercase = single_urls
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) )
__lowercase = value
# make sure that values are unique
if all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , _UpperCAmelCase ) ) for url in data_url )
__lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowercase = [data_url[0]] * len(_UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_UpperCAmelCase )
return dummy_data_list
def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def a__ ( self : int ) -> str:
"""simple docstring"""
pass
def a__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
def _iter_archive_members(_UpperCAmelCase : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__lowercase = Path(self.dummy_file ).parent
__lowercase = path.relative_to(_UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_UpperCAmelCase )
__lowercase = Path(_UpperCAmelCase )
__lowercase = _iter_archive_members(_UpperCAmelCase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_UpperCAmelCase ).as_posix(), file_path.open('rb' )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [paths]
for path in paths:
if os.path.isfile(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_UpperCAmelCase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase )
| 688 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """Hello, World!"""
SCREAMING_SNAKE_CASE__ = """en_XX"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ) -> Optional[int]:
__lowercase = Path('data_bin' )
__lowercase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe='sentencepiece' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
__lowercase = xmod.model.encoder.sentence_encoder
__lowercase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , SCREAMING_SNAKE_CASE )
__lowercase = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase = xmod_sent_encoder.embed_tokens.weight
__lowercase = xmod_sent_encoder.embed_positions.weight
__lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase = xmod_sent_encoder.layernorm_embedding.weight
__lowercase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase = model.roberta.encoder.layer[i]
__lowercase = xmod_sent_encoder.layers[i]
# self attention
__lowercase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowercase = xmod_layer.self_attn.q_proj.weight
__lowercase = xmod_layer.self_attn.q_proj.bias
__lowercase = xmod_layer.self_attn.k_proj.weight
__lowercase = xmod_layer.self_attn.k_proj.bias
__lowercase = xmod_layer.self_attn.v_proj.weight
__lowercase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowercase = xmod_layer.self_attn.out_proj.weight
__lowercase = xmod_layer.self_attn.out_proj.bias
__lowercase = xmod_layer.self_attn_layer_norm.weight
__lowercase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
# output
__lowercase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
__lowercase = xmod_layer.final_layer_norm.weight
__lowercase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase = xmod_layer.adapter_layer_norm.weight
__lowercase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase = bert_output.adapter_modules[lang_code]
__lowercase = xmod_layer.adapter_modules[lang_code]
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase = xmod_sent_encoder.layer_norm.weight
__lowercase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].dense.weight
__lowercase = xmod.model.classification_heads['mnli'].dense.bias
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight
__lowercase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowercase = xmod.model.encoder.lm_head.dense.weight
__lowercase = xmod.model.encoder.lm_head.dense.bias
__lowercase = xmod.model.encoder.lm_head.layer_norm.weight
__lowercase = xmod.model.encoder.lm_head.layer_norm.bias
__lowercase = xmod.model.encoder.lm_head.weight
__lowercase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
__lowercase = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
__lowercase = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowercase = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 719 |
import math
import sys
import cva
import numpy as np
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__lowercase = math.sqrt(SCREAMING_SNAKE_CASE )
__lowercase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> np.ndarray:
__lowercase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__lowercase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , SCREAMING_SNAKE_CASE ):
for j in range(0 , SCREAMING_SNAKE_CASE ):
__lowercase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , ) -> np.ndarray:
__lowercase = np.zeros(img.shape )
__lowercase = get_gauss_kernel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__lowercase = get_slice(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = img_s - img_s[kernel_size // 2, kernel_size // 2]
__lowercase = vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.sum(SCREAMING_SNAKE_CASE ) / np.sum(SCREAMING_SNAKE_CASE )
__lowercase = val
return imga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list ) -> tuple:
__lowercase = args[1] if args[1:] else '../image_data/lena.jpg'
__lowercase = float(args[2] ) if args[2:] else 1.0
__lowercase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__lowercase = int(args[4] )
__lowercase = kernel_size + abs(kernel_size % 2 - 1 )
else:
__lowercase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = parse_args(sys.argv)
SCREAMING_SNAKE_CASE__ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
SCREAMING_SNAKE_CASE__ = img / 255
SCREAMING_SNAKE_CASE__ = out.astype("""float32""")
SCREAMING_SNAKE_CASE__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
SCREAMING_SNAKE_CASE__ = out * 255
SCREAMING_SNAKE_CASE__ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 688 | 0 |
import unittest
from knapsack import knapsack as k
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase = 0
__lowercase = [0]
__lowercase = [0]
__lowercase = len(_UpperCAmelCase )
self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 0 )
__lowercase = [60]
__lowercase = [10]
__lowercase = len(_UpperCAmelCase )
self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 0 )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = 3
__lowercase = [1, 2, 3]
__lowercase = [3, 2, 1]
__lowercase = len(_UpperCAmelCase )
self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 5 )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = 50
__lowercase = [60, 1_00, 1_20]
__lowercase = [10, 20, 30]
__lowercase = len(_UpperCAmelCase )
self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 2_20 )
if __name__ == "__main__":
unittest.main() | 720 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : Tuple=4_00 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
__lowercase = size if size is not None else {'height': 18, 'width': 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = apply_ocr
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr' ) )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
pass
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase )
self.assertIsInstance(encoding.boxes , _UpperCAmelCase )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowercase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__lowercase = Image.open(ds[0]['file'] ).convert('RGB' )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowercase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__lowercase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase )
self.assertListEqual(encoding.boxes , _UpperCAmelCase )
# with apply_OCR = False
__lowercase = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 688 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
SCREAMING_SNAKE_CASE__ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
SCREAMING_SNAKE_CASE__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A__ :
lowerCAmelCase__ : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
lowerCAmelCase__ : Optional[str] = field(default=lowerCAmelCase__ , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase__ : Optional[str] = field(default=lowerCAmelCase__ , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase__ : int = field(default=32 , metadata={"help": "The size of the square patches to use for masking."} )
lowerCAmelCase__ : float = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
lowerCAmelCase__ : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase__ : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = {}
if self.train_dir is not None:
__lowercase = self.train_dir
if self.validation_dir is not None:
__lowercase = self.validation_dir
__lowercase = data_files if data_files else None
@dataclass
class A__ :
lowerCAmelCase__ : str = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
lowerCAmelCase__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase__ : str = field(default=lowerCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase__ : bool = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase__ : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
lowerCAmelCase__ : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
lowerCAmelCase__ : Optional[int] = field(
default=lowerCAmelCase__ , metadata={"help": "Stride to use for the encoder."} , )
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : Optional[int]=1_92 , _UpperCAmelCase : int=32 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : Optional[int]=0.6 ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = input_size
__lowercase = mask_patch_size
__lowercase = model_patch_size
__lowercase = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
__lowercase = self.input_size // self.mask_patch_size
__lowercase = self.mask_patch_size // self.model_patch_size
__lowercase = self.rand_size**2
__lowercase = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = np.random.permutation(self.token_count )[: self.mask_count]
__lowercase = np.zeros(self.token_count , dtype=_UpperCAmelCase )
__lowercase = 1
__lowercase = mask.reshape((self.rand_size, self.rand_size) )
__lowercase = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
__lowercase = torch.stack([example['pixel_values'] for example in examples] )
__lowercase = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def __SCREAMING_SNAKE_CASE ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowercase = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowercase = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
__lowercase = ds['train'].train_test_split(data_args.train_val_split )
__lowercase = split['train']
__lowercase = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.config_name_or_path , **SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(SCREAMING_SNAKE_CASE , 'decoder_type' ):
__lowercase = 'simmim'
# adapt config
__lowercase = model_args.image_size if model_args.image_size is not None else config.image_size
__lowercase = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__lowercase = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__lowercase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
__lowercase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE )
else:
__lowercase = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__lowercase = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__lowercase = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__lowercase = AutoModelForMaskedImageModeling.from_config(SCREAMING_SNAKE_CASE )
if training_args.do_train:
__lowercase = ds['train'].column_names
else:
__lowercase = ds['validation'].column_names
if data_args.image_column_name is not None:
__lowercase = data_args.image_column_name
elif "image" in column_names:
__lowercase = 'image'
elif "img" in column_names:
__lowercase = 'img'
else:
__lowercase = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__lowercase = Compose(
[
Lambda(lambda SCREAMING_SNAKE_CASE : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__lowercase = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(SCREAMING_SNAKE_CASE : int ):
__lowercase = [transforms(SCREAMING_SNAKE_CASE ) for image in examples[image_column_name]]
__lowercase = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__lowercase = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__lowercase = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(SCREAMING_SNAKE_CASE )
# Initialize our trainer
__lowercase = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
__lowercase = None
if training_args.resume_from_checkpoint is not None:
__lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase = last_checkpoint
__lowercase = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowercase = trainer.evaluate()
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
__lowercase = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 721 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "umt5"
lowerCAmelCase__ : Tuple = ["past_key_values"]
def __init__( self : str , _UpperCAmelCase : int=25_01_12 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : List[str]=64 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : str=8 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : str=32 , _UpperCAmelCase : Optional[int]=1_28 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : str=1e-6 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str="gated-gelu" , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Tuple="T5Tokenizer" , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : int=1 , _UpperCAmelCase : List[str]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=_UpperCAmelCase , tokenizer_class=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_heads
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = self.feed_forward_proj.split('-' )
__lowercase = act_info[-1]
__lowercase = act_info[0] == 'gated'
if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__lowercase = 'gelu_new'
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.d_model
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.num_heads
@property
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.num_layers
class A__ ( lowerCAmelCase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowercase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase = 'past_encoder_sequence + sequence'
__lowercase = {0: 'batch'}
__lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return 13
@property
def a__ ( self : Dict ) -> float:
"""simple docstring"""
return 5e-4
| 688 | 0 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class A__ :
@property
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.get_dummy_input()
@property
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def a__ ( self : Optional[Any] , _UpperCAmelCase : int=True , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Any=False , ) -> Dict:
"""simple docstring"""
__lowercase = 4
__lowercase = 32
__lowercase = (32, 32)
__lowercase = torch.manual_seed(0 )
__lowercase = torch.device(_UpperCAmelCase )
__lowercase = (batch_size, num_channels) + sizes
__lowercase = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=_UpperCAmelCase )
__lowercase = {'hidden_states': hidden_states}
if include_temb:
__lowercase = 1_28
__lowercase = randn_tensor((batch_size, temb_channels) , generator=_UpperCAmelCase , device=_UpperCAmelCase )
if include_res_hidden_states_tuple:
__lowercase = torch.manual_seed(1 )
__lowercase = (randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=_UpperCAmelCase ),)
if include_encoder_hidden_states:
__lowercase = floats_tensor((batch_size, 32, 32) ).to(_UpperCAmelCase )
if include_skip_sample:
__lowercase = randn_tensor(((batch_size, 3) + sizes) , generator=_UpperCAmelCase , device=_UpperCAmelCase )
return dummy_input
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 1_28,
}
if self.block_type == "up":
__lowercase = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
__lowercase = self.dummy_input
return init_dict, inputs_dict
def a__ ( self : int , _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.prepare_init_args_and_inputs_for_common()
__lowercase = self.block_class(**_UpperCAmelCase )
unet_block.to(_UpperCAmelCase )
unet_block.eval()
with torch.no_grad():
__lowercase = unet_block(**_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = output[0]
self.assertEqual(output.shape , self.output_shape )
__lowercase = output[0, -1, -3:, -3:]
__lowercase = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
assert torch_all_close(output_slice.flatten() , _UpperCAmelCase , atol=5e-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.prepare_init_args_and_inputs_for_common()
__lowercase = self.block_class(**_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
__lowercase = model(**_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = output[0]
__lowercase = torch.device(_UpperCAmelCase )
__lowercase = randn_tensor(output.shape , device=_UpperCAmelCase )
__lowercase = torch.nn.functional.mse_loss(_UpperCAmelCase , _UpperCAmelCase )
loss.backward()
| 700 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : int=5_02_65 , _UpperCAmelCase : Union[str, Any]=7_68 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[str]=30_72 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Tuple=1_28 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : int=64 , _UpperCAmelCase : List[str]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=2_24 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = version.parse("1.12" )
@property
def a__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : Any ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return 12
def a__ ( self : Tuple , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 688 | 0 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = LayoutLMTokenizer
lowerCAmelCase__ : List[Any] = LayoutLMTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[int] = True
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
super().setUp()
__lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : Any , **_UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : Dict ) -> Dict:
"""simple docstring"""
__lowercase = 'UNwant\u00E9d,running'
__lowercase = 'unwanted, running'
return input_text, output_text
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
pass
| 701 |
from pathlib import Path
import numpy as np
from PIL import Image
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase , __lowercase , __lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase = np.zeros_like(SCREAMING_SNAKE_CASE )
__lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
SCREAMING_SNAKE_CASE__ = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 688 | 0 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class A__ ( lowerCAmelCase__ ):
@add_start_docstrings(_UpperCAmelCase )
def __call__( self : List[Any] , _UpperCAmelCase : torch.LongTensor , _UpperCAmelCase : torch.FloatTensor , **_UpperCAmelCase : List[Any] ) -> bool:
"""simple docstring"""
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class A__ ( lowerCAmelCase__ ):
def __init__( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] = None ) -> Optional[Any]:
"""simple docstring"""
__lowercase = max_length
__lowercase = max_position_embeddings
@add_start_docstrings(_UpperCAmelCase )
def __call__( self : Optional[int] , _UpperCAmelCase : torch.LongTensor , _UpperCAmelCase : torch.FloatTensor , **_UpperCAmelCase : Optional[int] ) -> bool:
"""simple docstring"""
__lowercase = input_ids.shape[-1]
__lowercase = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
'exceptions, performance degradation, or nothing at all.' )
return is_done
class A__ ( lowerCAmelCase__ ):
def __init__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
'with `max_length = start_length + max_new_tokens` instead.' , _UpperCAmelCase , )
__lowercase = start_length
__lowercase = max_new_tokens
__lowercase = start_length + max_new_tokens
@add_start_docstrings(_UpperCAmelCase )
def __call__( self : Dict , _UpperCAmelCase : torch.LongTensor , _UpperCAmelCase : torch.FloatTensor , **_UpperCAmelCase : Dict ) -> bool:
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : float , _UpperCAmelCase : Optional[float] = None ) -> int:
"""simple docstring"""
__lowercase = max_time
__lowercase = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(_UpperCAmelCase )
def __call__( self : Tuple , _UpperCAmelCase : torch.LongTensor , _UpperCAmelCase : torch.FloatTensor , **_UpperCAmelCase : List[Any] ) -> bool:
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class A__ ( lowerCAmelCase__ ):
@add_start_docstrings(_UpperCAmelCase )
def __call__( self : Tuple , _UpperCAmelCase : torch.LongTensor , _UpperCAmelCase : torch.FloatTensor , **_UpperCAmelCase : Dict ) -> bool:
"""simple docstring"""
return any(criteria(_UpperCAmelCase , _UpperCAmelCase ) for criteria in self )
@property
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
for stopping_criterium in self:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return stopping_criterium.max_length
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return stopping_criterium.max_length
return None
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : StoppingCriteriaList , SCREAMING_SNAKE_CASE : int ) -> StoppingCriteriaList:
__lowercase = stopping_criteria.max_length
__lowercase = deepcopy(SCREAMING_SNAKE_CASE )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter' , SCREAMING_SNAKE_CASE )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=SCREAMING_SNAKE_CASE ) )
return new_stopping_criteria
| 702 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = ["pixel_values"]
def __init__( self : Tuple , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 2_55 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : bool = True , **_UpperCAmelCase : str , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = size if size is not None else {'height': 3_84, 'width': 3_84}
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase = do_convert_rgb
def a__ ( self : int , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
__lowercase = (size['height'], size['width'])
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : ImageInput , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Dict[str, int]] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : int , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__lowercase = BatchFeature(data={'pixel_values': images} , tensor_type=_UpperCAmelCase )
return encoded_outputs
| 688 | 0 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class A__ :
def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : int=13 , _UpperCAmelCase : str=7 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Any=17 , _UpperCAmelCase : List[str]=23 , _UpperCAmelCase : Dict=11 , _UpperCAmelCase : Union[str, Any]=True , ) -> Optional[int]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = act_dim
__lowercase = state_dim
__lowercase = hidden_size
__lowercase = max_length
__lowercase = is_training
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__lowercase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__lowercase = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowercase = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowercase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
__lowercase = random_attention_mask((self.batch_size, self.seq_length) )
__lowercase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def a__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , ) -> Tuple:
"""simple docstring"""
__lowercase = DecisionTransformerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ : Any = ()
lowerCAmelCase__ : Optional[int] = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ : Any = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ : int = False
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : str = False
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = DecisionTransformerModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = DecisionTransformerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_UpperCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = [
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(_UpperCAmelCase )] , _UpperCAmelCase )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = 2 # number of steps of autoregressive prediction we will perform
__lowercase = 10 # defined by the RL environment, may be normalized
__lowercase = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
__lowercase = model.to(_UpperCAmelCase )
__lowercase = model.config
torch.manual_seed(0 )
__lowercase = torch.randn(1 , 1 , config.state_dim ).to(device=_UpperCAmelCase , dtype=torch.floataa ) # env.reset()
__lowercase = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=_UpperCAmelCase )
__lowercase = torch.tensor(_UpperCAmelCase , device=_UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__lowercase = state
__lowercase = torch.zeros(1 , 0 , config.act_dim , device=_UpperCAmelCase , dtype=torch.floataa )
__lowercase = torch.zeros(1 , 0 , device=_UpperCAmelCase , dtype=torch.floataa )
__lowercase = torch.tensor(0 , device=_UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(_UpperCAmelCase ):
__lowercase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_UpperCAmelCase )] , dim=1 )
__lowercase = torch.cat([rewards, torch.zeros(1 , 1 , device=_UpperCAmelCase )] , dim=1 )
__lowercase = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__lowercase , __lowercase , __lowercase = model(
states=_UpperCAmelCase , actions=_UpperCAmelCase , rewards=_UpperCAmelCase , returns_to_go=_UpperCAmelCase , timesteps=_UpperCAmelCase , attention_mask=_UpperCAmelCase , return_dict=_UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
__lowercase , __lowercase , __lowercase , __lowercase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=_UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
__lowercase = action_pred[0, -1]
__lowercase = torch.cat([states, state] , dim=1 )
__lowercase = returns_to_go[0, -1] - reward
__lowercase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__lowercase = torch.cat(
[timesteps, torch.ones((1, 1) , device=_UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 704 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Any:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__lowercase = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__lowercase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__lowercase = 4
__lowercase = True
# hparam_utils.py hparams
__lowercase = 0.664_694
__lowercase = 0.207_951
__lowercase = 0.121_194
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = 0.0_352_513
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowercase = 4
__lowercase = False
# hparam_utils.py hparams
__lowercase = 36.4_519
__lowercase = 0.903_421
__lowercase = 222.088
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 0.763_141
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__lowercase = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__lowercase = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__lowercase = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
__lowercase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688 | 0 |
from __future__ import annotations
import math
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE : int | float , SCREAMING_SNAKE_CASE : int | float , SCREAMING_SNAKE_CASE : int = 100 , ) -> float:
__lowercase = x_start
__lowercase = fnc(SCREAMING_SNAKE_CASE )
__lowercase = 0.0
for _ in range(SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
__lowercase = (x_end - x_start) / steps + xa
__lowercase = fnc(SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__lowercase = xa
__lowercase = fxa
return length
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
SCREAMING_SNAKE_CASE__ = 10
while i <= 10_0000:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 705 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __SCREAMING_SNAKE_CASE ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 688 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class A__ :
def __init__( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str]=13 , _UpperCAmelCase : str=7 , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : str=True , _UpperCAmelCase : str=99 , _UpperCAmelCase : int=64 , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : str=5 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : int=5_12 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Any=0.02 , _UpperCAmelCase : int=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=None , ) -> int:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = embedding_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def a__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : str ) -> Optional[int]:
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
__lowercase = MegatronBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = MegatronBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = MegatronBertForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = MegatronBertForNextSentencePrediction(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def a__ ( self : int , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = MegatronBertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
__lowercase = MegatronBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = MegatronBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = MegatronBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = MegatronBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[Any] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Optional[int] = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Union[str, Any] = True
# test_resize_embeddings = False
lowerCAmelCase__ : Tuple = False
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int=False ) -> List[Any]:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = MegatronBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_UpperCAmelCase )
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_UpperCAmelCase )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_UpperCAmelCase )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> str:
return torch.tensor(
SCREAMING_SNAKE_CASE , dtype=torch.long , device=SCREAMING_SNAKE_CASE , )
snake_case_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
@unittest.skip('Model is not available.' )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
__lowercase = os.path.join(os.environ['MYDIR'] , _UpperCAmelCase )
__lowercase = MegatronBertModel.from_pretrained(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.half()
__lowercase = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3 ):
for jj in range(3 ):
__lowercase = output[0, ii, jj]
__lowercase = expected[3 * ii + jj]
__lowercase = 'ii={} jj={} a={} b={}'.format(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertTrue(math.isclose(_UpperCAmelCase , _UpperCAmelCase , rel_tol=_UpperCAmelCase , abs_tol=_UpperCAmelCase ) , msg=_UpperCAmelCase )
| 706 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """Hello, World!"""
SCREAMING_SNAKE_CASE__ = """en_XX"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ) -> Optional[int]:
__lowercase = Path('data_bin' )
__lowercase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe='sentencepiece' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
__lowercase = xmod.model.encoder.sentence_encoder
__lowercase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , SCREAMING_SNAKE_CASE )
__lowercase = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase = xmod_sent_encoder.embed_tokens.weight
__lowercase = xmod_sent_encoder.embed_positions.weight
__lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase = xmod_sent_encoder.layernorm_embedding.weight
__lowercase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase = model.roberta.encoder.layer[i]
__lowercase = xmod_sent_encoder.layers[i]
# self attention
__lowercase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowercase = xmod_layer.self_attn.q_proj.weight
__lowercase = xmod_layer.self_attn.q_proj.bias
__lowercase = xmod_layer.self_attn.k_proj.weight
__lowercase = xmod_layer.self_attn.k_proj.bias
__lowercase = xmod_layer.self_attn.v_proj.weight
__lowercase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowercase = xmod_layer.self_attn.out_proj.weight
__lowercase = xmod_layer.self_attn.out_proj.bias
__lowercase = xmod_layer.self_attn_layer_norm.weight
__lowercase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
# output
__lowercase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
__lowercase = xmod_layer.final_layer_norm.weight
__lowercase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase = xmod_layer.adapter_layer_norm.weight
__lowercase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase = bert_output.adapter_modules[lang_code]
__lowercase = xmod_layer.adapter_modules[lang_code]
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase = xmod_sent_encoder.layer_norm.weight
__lowercase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].dense.weight
__lowercase = xmod.model.classification_heads['mnli'].dense.bias
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight
__lowercase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowercase = xmod.model.encoder.lm_head.dense.weight
__lowercase = xmod.model.encoder.lm_head.dense.bias
__lowercase = xmod.model.encoder.lm_head.layer_norm.weight
__lowercase = xmod.model.encoder.lm_head.layer_norm.bias
__lowercase = xmod.model.encoder.lm_head.weight
__lowercase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
__lowercase = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
__lowercase = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowercase = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 688 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> str:
return " ".join(
''.join(word[::-1] ) if len(SCREAMING_SNAKE_CASE ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 707 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ) -> float:
__lowercase = u
for i in range(1 , SCREAMING_SNAKE_CASE ):
__lowercase = temp * (u - i)
return temp
def __SCREAMING_SNAKE_CASE ( ) -> None:
__lowercase = int(input('enter the numbers of values: ' ) )
__lowercase = []
for _ in range(SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
y[i].append(SCREAMING_SNAKE_CASE )
__lowercase = 0
print('enter the values of parameters in a list: ' )
__lowercase = list(map(SCREAMING_SNAKE_CASE , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(SCREAMING_SNAKE_CASE ):
__lowercase = float(input() )
__lowercase = int(input('enter the value to interpolate: ' ) )
__lowercase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
__lowercase = y[j + 1][i - 1] - y[j][i - 1]
__lowercase = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
summ += (ucal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 688 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "bridgetower_vision_model"
def __init__( self : int , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : int=3 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : Tuple=2_88 , _UpperCAmelCase : Optional[int]=1 , _UpperCAmelCase : Optional[Any]=1e-0_5 , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : int=False , **_UpperCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_channels
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_factor
__lowercase = layer_norm_eps
__lowercase = stop_gradient
__lowercase = share_layernorm
__lowercase = remove_last_layer
@classmethod
def a__ ( cls : int , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[Any] ) -> "PretrainedConfig":
"""simple docstring"""
__lowercase , __lowercase = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
if config_dict.get('model_type' ) == "bridgetower":
__lowercase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = "bridgetower_text_model"
def __init__( self : str , _UpperCAmelCase : Tuple=5_02_65 , _UpperCAmelCase : Dict=7_68 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Optional[Any]=1 , _UpperCAmelCase : Optional[Any]=30_72 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[Any]=5_14 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Union[str, Any]=1e-0_5 , _UpperCAmelCase : int=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Optional[int]="absolute" , _UpperCAmelCase : str=True , **_UpperCAmelCase : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = initializer_factor
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = eos_token_id
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Tuple ) -> "PretrainedConfig":
"""simple docstring"""
__lowercase , __lowercase = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
if config_dict.get('model_type' ) == "bridgetower":
__lowercase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = "bridgetower"
def __init__( self : Union[str, Any] , _UpperCAmelCase : str=True , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : List[str]=7_68 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : List[Any]=1e-0_5 , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Tuple="add" , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : int=6 , _UpperCAmelCase : Any=False , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : Tuple , ) -> str:
"""simple docstring"""
__lowercase = kwargs.pop('text_config_dict' , _UpperCAmelCase )
__lowercase = kwargs.pop('vision_config_dict' , _UpperCAmelCase )
super().__init__(**_UpperCAmelCase )
__lowercase = share_cross_modal_transformer_layers
__lowercase = hidden_act
__lowercase = hidden_size
__lowercase = initializer_factor
__lowercase = layer_norm_eps
__lowercase = share_link_tower_layers
__lowercase = link_tower_type
__lowercase = num_attention_heads
__lowercase = num_hidden_layers
__lowercase = tie_word_embeddings
__lowercase = init_layernorm_from_vision_encoder
if text_config is None:
__lowercase = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
__lowercase = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
__lowercase = BridgeTowerTextConfig(**_UpperCAmelCase )
__lowercase = BridgeTowerVisionConfig(**_UpperCAmelCase )
@classmethod
def a__ ( cls : Optional[Any] , _UpperCAmelCase : BridgeTowerTextConfig , _UpperCAmelCase : BridgeTowerVisionConfig , **_UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.text_config.to_dict()
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 708 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
__lowercase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
__lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 688 | 0 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
SCREAMING_SNAKE_CASE__ = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(SCREAMING_SNAKE_CASE ) , version.parse(SCREAMING_SNAKE_CASE ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ) -> None:
__lowercase = F"""\n{hint}""" if hint is not None else ''
# non-versioned check
if re.match(R'^[\w_\-\d]+$' , SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase , __lowercase = requirement, None, None
else:
__lowercase = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F""" got {requirement}""" )
__lowercase , __lowercase = match[0]
__lowercase = want_full.split(',' ) # there could be multiple requirements
__lowercase = {}
for w in want_range:
__lowercase = re.findall(R'^([\s!=<>]{1,2})(.+)' , SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F""" but got {requirement}""" )
__lowercase , __lowercase = match[0]
__lowercase = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
__lowercase = '.'.join([str(SCREAMING_SNAKE_CASE ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return
# check if any version is installed
try:
__lowercase = importlib.metadata.version(SCREAMING_SNAKE_CASE )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> Dict:
__lowercase = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 710 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = ProphetNetTokenizer
lowerCAmelCase__ : str = False
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : str , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = 'UNwant\u00E9d,running'
__lowercase = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowercase = {}
for i, token in enumerate(_UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowercase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
__lowercase = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 688 | 0 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
SCREAMING_SNAKE_CASE__ = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
SCREAMING_SNAKE_CASE__ = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
__lowercase = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE )[0]
@deprecated(SCREAMING_SNAKE_CASE , 'Please use tf.data to implement this functionality.' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> int:
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE ) as bytestream:
__lowercase = _readaa(SCREAMING_SNAKE_CASE )
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
__lowercase = _readaa(SCREAMING_SNAKE_CASE )
__lowercase = _readaa(SCREAMING_SNAKE_CASE )
__lowercase = _readaa(SCREAMING_SNAKE_CASE )
__lowercase = bytestream.read(rows * cols * num_images )
__lowercase = numpy.frombuffer(SCREAMING_SNAKE_CASE , dtype=numpy.uinta )
__lowercase = data.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
return data
@deprecated(SCREAMING_SNAKE_CASE , 'Please use tf.one_hot on tensors.' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
__lowercase = labels_dense.shape[0]
__lowercase = numpy.arange(SCREAMING_SNAKE_CASE ) * num_classes
__lowercase = numpy.zeros((num_labels, num_classes) )
__lowercase = 1
return labels_one_hot
@deprecated(SCREAMING_SNAKE_CASE , 'Please use tf.data to implement this functionality.' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : int=10 ) -> Union[str, Any]:
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE ) as bytestream:
__lowercase = _readaa(SCREAMING_SNAKE_CASE )
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
__lowercase = _readaa(SCREAMING_SNAKE_CASE )
__lowercase = bytestream.read(SCREAMING_SNAKE_CASE )
__lowercase = numpy.frombuffer(SCREAMING_SNAKE_CASE , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return labels
class A__ :
@deprecated(
_UpperCAmelCase , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : str=False , _UpperCAmelCase : Tuple=dtypes.floataa , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=None , ) -> int:
"""simple docstring"""
__lowercase , __lowercase = random_seed.get_seed(_UpperCAmelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__lowercase = dtypes.as_dtype(_UpperCAmelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
__lowercase = 1_00_00
__lowercase = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"""images.shape: {images.shape} labels.shape: {labels.shape}"""
__lowercase = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__lowercase = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__lowercase = images.astype(numpy.floataa )
__lowercase = numpy.multiply(_UpperCAmelCase , 1.0 / 255.0 )
__lowercase = images
__lowercase = labels
__lowercase = 0
__lowercase = 0
@property
def a__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self._images
@property
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self._labels
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self._num_examples
@property
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self._epochs_completed
def a__ ( self : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : str=True ) -> List[str]:
"""simple docstring"""
if fake_data:
__lowercase = [1] * 7_84
__lowercase = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_UpperCAmelCase )],
[fake_label for _ in range(_UpperCAmelCase )],
)
__lowercase = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__lowercase = numpy.arange(self._num_examples )
numpy.random.shuffle(_UpperCAmelCase )
__lowercase = self.images[perma]
__lowercase = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__lowercase = self._num_examples - start
__lowercase = self._images[start : self._num_examples]
__lowercase = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__lowercase = numpy.arange(self._num_examples )
numpy.random.shuffle(_UpperCAmelCase )
__lowercase = self.images[perm]
__lowercase = self.labels[perm]
# Start next epoch
__lowercase = 0
__lowercase = batch_size - rest_num_examples
__lowercase = self._index_in_epoch
__lowercase = self._images[start:end]
__lowercase = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__lowercase = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(SCREAMING_SNAKE_CASE , 'Please write your own downloading logic.' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
if not gfile.Exists(SCREAMING_SNAKE_CASE ):
gfile.MakeDirs(SCREAMING_SNAKE_CASE )
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not gfile.Exists(SCREAMING_SNAKE_CASE ):
urllib.request.urlretrieve(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # noqa: S310
with gfile.GFile(SCREAMING_SNAKE_CASE ) as f:
__lowercase = f.size()
print('Successfully downloaded' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 'bytes.' )
return filepath
@deprecated(
SCREAMING_SNAKE_CASE , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : int=dtypes.floataa , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Optional[int]=5000 , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Any=DEFAULT_SOURCE_URL , ) -> Tuple:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=SCREAMING_SNAKE_CASE , one_hot=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , seed=SCREAMING_SNAKE_CASE )
__lowercase = fake()
__lowercase = fake()
__lowercase = fake()
return _Datasets(train=SCREAMING_SNAKE_CASE , validation=SCREAMING_SNAKE_CASE , test=SCREAMING_SNAKE_CASE )
if not source_url: # empty string check
__lowercase = DEFAULT_SOURCE_URL
__lowercase = 'train-images-idx3-ubyte.gz'
__lowercase = 'train-labels-idx1-ubyte.gz'
__lowercase = 't10k-images-idx3-ubyte.gz'
__lowercase = 't10k-labels-idx1-ubyte.gz'
__lowercase = _maybe_download(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , source_url + train_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
__lowercase = _extract_images(SCREAMING_SNAKE_CASE )
__lowercase = _maybe_download(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , source_url + train_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
__lowercase = _extract_labels(SCREAMING_SNAKE_CASE , one_hot=SCREAMING_SNAKE_CASE )
__lowercase = _maybe_download(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , source_url + test_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
__lowercase = _extract_images(SCREAMING_SNAKE_CASE )
__lowercase = _maybe_download(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , source_url + test_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
__lowercase = _extract_labels(SCREAMING_SNAKE_CASE , one_hot=SCREAMING_SNAKE_CASE )
if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE ):
__lowercase = (
'Validation size should be between 0 and '
F"""{len(SCREAMING_SNAKE_CASE )}. Received: {validation_size}."""
)
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = train_images[:validation_size]
__lowercase = train_labels[:validation_size]
__lowercase = train_images[validation_size:]
__lowercase = train_labels[validation_size:]
__lowercase = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
__lowercase = _DataSet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
__lowercase = _DataSet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
__lowercase = _DataSet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return _Datasets(train=SCREAMING_SNAKE_CASE , validation=SCREAMING_SNAKE_CASE , test=SCREAMING_SNAKE_CASE )
| 711 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""jukebox""": 512,
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase__ : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=["v3", "v2", "v2"] , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Union[str, Any]="<|endoftext|>" , **_UpperCAmelCase : Tuple , ) -> List[Any]:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
unk_token=_UpperCAmelCase , n_genres=_UpperCAmelCase , version=_UpperCAmelCase , max_n_lyric_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = version
__lowercase = max_n_lyric_tokens
__lowercase = n_genres
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
__lowercase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowercase = oov.replace(R'\-\'' , R'\-+\'' )
__lowercase = regex.compile(_UpperCAmelCase )
__lowercase = {v: k for k, v in self.artists_encoder.items()}
__lowercase = {v: k for k, v in self.genres_encoder.items()}
__lowercase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = [self.artists_encoder.get(_UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(_UpperCAmelCase ) ):
__lowercase = [self.genres_encoder.get(_UpperCAmelCase , 0 ) for genre in list_genres[genres]]
__lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowercase = [[self.lyrics_encoder.get(_UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def a__ ( self : str , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
return list(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self.prepare_for_tokenization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = self._tokenize(_UpperCAmelCase )
return artist, genre, lyrics
def a__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowercase = artists[idx].lower()
__lowercase = [genres[idx].lower()]
else:
__lowercase = self._normalize(artists[idx] ) + '.v2'
__lowercase = [
self._normalize(_UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
__lowercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
__lowercase = {vocab[index]: index + 1 for index in range(len(_UpperCAmelCase ) )}
__lowercase = 0
__lowercase = len(_UpperCAmelCase ) + 1
__lowercase = self.vocab
__lowercase = {v: k for k, v in self.vocab.items()}
__lowercase = ''
else:
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
__lowercase = self._run_strip_accents(_UpperCAmelCase )
__lowercase = lyrics.replace('\\' , '\n' )
__lowercase = self.out_of_vocab.sub('' , _UpperCAmelCase ), [], []
return artists, genres, lyrics
def a__ ( self : Tuple , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = unicodedata.normalize('NFD' , _UpperCAmelCase )
__lowercase = []
for char in text:
__lowercase = unicodedata.category(_UpperCAmelCase )
if cat == "Mn":
continue
output.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = (
[chr(_UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
__lowercase = frozenset(_UpperCAmelCase )
__lowercase = re.compile(R'_+' )
__lowercase = ''.join([c if c in accepted else '_' for c in text.lower()] )
__lowercase = pattern.sub('_' , _UpperCAmelCase ).strip('_' )
return text
def a__ ( self : List[str] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return " ".join(_UpperCAmelCase )
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = False ) -> int:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = TensorType(_UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
__lowercase = tf.constant
__lowercase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
__lowercase = torch.tensor
__lowercase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
__lowercase = jnp.array
__lowercase = _is_jax
else:
__lowercase = np.asarray
__lowercase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowercase = [inputs]
if not is_tensor(_UpperCAmelCase ):
__lowercase = as_tensor(_UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int="" , _UpperCAmelCase : Tuple="pt" ) -> BatchEncoding:
"""simple docstring"""
__lowercase = [0, 0, 0]
__lowercase = [artist] * len(self.version )
__lowercase = [genres] * len(self.version )
__lowercase , __lowercase , __lowercase = self.tokenize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase , __lowercase , __lowercase = self._convert_token_to_id(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = [-INFINITY] * len(full_tokens[-1] )
__lowercase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def a__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def a__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.artists_decoder.get(_UpperCAmelCase )
__lowercase = [self.genres_decoder.get(_UpperCAmelCase ) for genre in genres_index]
__lowercase = [self.lyrics_decoder.get(_UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 688 | 0 |
from __future__ import annotations
from random import random
class A__ :
def __init__( self : Union[str, Any] , _UpperCAmelCase : int | None = None ) -> List[str]:
"""simple docstring"""
__lowercase = value
__lowercase = random()
__lowercase = None
__lowercase = None
def __repr__( self : Optional[Any] ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ) -> str:
"""simple docstring"""
__lowercase = str(self.value ) + ' '
__lowercase = str(self.left or '' )
__lowercase = str(self.right or '' )
return value + left + right
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Node | None , SCREAMING_SNAKE_CASE : int ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__lowercase , __lowercase = split(root.left , SCREAMING_SNAKE_CASE )
return left, root
else:
__lowercase , __lowercase = split(root.right , SCREAMING_SNAKE_CASE )
return root, right
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Node | None , SCREAMING_SNAKE_CASE : Node | None ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__lowercase = merge(left.right , SCREAMING_SNAKE_CASE )
return left
else:
__lowercase = merge(SCREAMING_SNAKE_CASE , right.left )
return right
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Node | None , SCREAMING_SNAKE_CASE : int ) -> Node | None:
__lowercase = Node(SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return merge(merge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Node | None , SCREAMING_SNAKE_CASE : int ) -> Node | None:
__lowercase , __lowercase = split(SCREAMING_SNAKE_CASE , value - 1 )
__lowercase , __lowercase = split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return merge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Node | None ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Node | None , SCREAMING_SNAKE_CASE : str ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
__lowercase = insert(SCREAMING_SNAKE_CASE , int(arg[1:] ) )
elif arg[0] == "-":
__lowercase = erase(SCREAMING_SNAKE_CASE , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def __SCREAMING_SNAKE_CASE ( ) -> None:
__lowercase = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
__lowercase = input()
while args != "q":
__lowercase = interact_treap(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
__lowercase = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 712 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=36 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Any=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
__lowercase = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[Any] = True
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int=False ) -> Tuple:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = AlbertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a__ ( self : int ) -> Any:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = AlbertModel.from_pretrained('albert-base-v2' )
__lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 688 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : str = "▁" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[str, AddedToken] = "<unk>" , _UpperCAmelCase : Union[str, AddedToken] = "</s>" , _UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['token']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__lowercase = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__lowercase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> str:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : int , _UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> Dict:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['unk']['id']
__lowercase = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 688 | 0 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
__lowercase = tmp_path / 'cache'
__lowercase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowercase = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
__lowercase = tmp_path / 'cache'
__lowercase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowercase = features.copy() if features else default_expected_features
__lowercase = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowercase = JsonDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
__lowercase = tmp_path / 'cache'
__lowercase = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
__lowercase = features.copy() if features else default_expected_features
__lowercase = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowercase = JsonDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__lowercase = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
__lowercase = features.copy()
__lowercase = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowercase = tmp_path / 'cache'
__lowercase = JsonDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
__lowercase = tmp_path / 'cache'
__lowercase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowercase = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ) -> str:
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = jsonl_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = [jsonl_path]
__lowercase = tmp_path / 'cache'
__lowercase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowercase = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int]=("train",) ) -> List[Any]:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
__lowercase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
__lowercase = tmp_path / 'cache'
__lowercase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowercase = JsonDatasetReader({'train': jsonl_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
__lowercase = tmp_path / 'cache'
__lowercase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowercase = features.copy() if features else default_expected_features
__lowercase = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowercase = JsonDatasetReader({'train': jsonl_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
if split:
__lowercase = {split: jsonl_path}
else:
__lowercase = 'train'
__lowercase = {'train': jsonl_path, 'test': jsonl_path}
__lowercase = tmp_path / 'cache'
__lowercase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowercase = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
return json.load(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Dict:
return [json.loads(SCREAMING_SNAKE_CASE ) for line in buffer]
class A__ :
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def a__ ( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase ).write()
buffer.seek(0 )
__lowercase = load_json_function(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert isinstance(exported_content[0] , _UpperCAmelCase )
assert len(_UpperCAmelCase ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase , orient=_UpperCAmelCase ).write()
buffer.seek(0 )
__lowercase = load_json(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_UpperCAmelCase , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_UpperCAmelCase ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowercase = load_json_function(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert isinstance(exported_content[0] , _UpperCAmelCase )
assert len(_UpperCAmelCase ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def a__ ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase , orient=_UpperCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowercase = load_json(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_UpperCAmelCase , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_UpperCAmelCase ) == 10
def a__ ( self : str , _UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
with pytest.raises(_UpperCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def a__ ( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = tmp_path_factory.mktemp('data' ) / f"""test.json.{extension}"""
__lowercase = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , compression=_UpperCAmelCase ).write()
with fsspec.open(_UpperCAmelCase , 'rb' , compression='infer' ) as f:
__lowercase = f.read()
with fsspec.open(_UpperCAmelCase , 'rb' , compression='infer' ) as f:
__lowercase = f.read()
assert exported_content == original_content
| 714 |
import string
from math import logaa
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> int:
__lowercase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__lowercase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> tuple[int, int]:
__lowercase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase = corpus_without_punctuation.split('\n' )
__lowercase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
return round(tf * idf , 3 )
| 688 | 0 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
__lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 715 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "retribert"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=3_05_22 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=5_12 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=1e-1_2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = share_encoders
__lowercase = projection_dim
| 688 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : Tuple=4_00 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
__lowercase = size if size is not None else {'height': 18, 'width': 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = apply_ocr
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr' ) )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
pass
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase )
self.assertIsInstance(encoding.boxes , _UpperCAmelCase )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowercase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__lowercase = Image.open(ds[0]['file'] ).convert('RGB' )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowercase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__lowercase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase )
self.assertListEqual(encoding.boxes , _UpperCAmelCase )
# with apply_OCR = False
__lowercase = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "umt5"
lowerCAmelCase__ : Tuple = ["past_key_values"]
def __init__( self : str , _UpperCAmelCase : int=25_01_12 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : List[str]=64 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : str=8 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : str=32 , _UpperCAmelCase : Optional[int]=1_28 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : str=1e-6 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str="gated-gelu" , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Tuple="T5Tokenizer" , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : int=1 , _UpperCAmelCase : List[str]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=_UpperCAmelCase , tokenizer_class=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_heads
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = self.feed_forward_proj.split('-' )
__lowercase = act_info[-1]
__lowercase = act_info[0] == 'gated'
if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__lowercase = 'gelu_new'
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.d_model
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.num_heads
@property
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.num_layers
class A__ ( lowerCAmelCase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowercase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase = 'past_encoder_sequence + sequence'
__lowercase = {0: 'batch'}
__lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return 13
@property
def a__ ( self : Dict ) -> float:
"""simple docstring"""
return 5e-4
| 717 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = ["image_processor", "tokenizer"]
lowerCAmelCase__ : Union[str, Any] = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ : Union[str, Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features['words']
__lowercase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__lowercase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowercase = images
return encoded_inputs
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def a__ ( self : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 688 | 0 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int = 0 ) -> list:
__lowercase = length or len(SCREAMING_SNAKE_CASE )
__lowercase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__lowercase , __lowercase = list_data[i + 1], list_data[i]
__lowercase = True
return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ :
lowerCAmelCase__ : Optional[int] = "dummy_data"
lowerCAmelCase__ : str = "datasets"
lowerCAmelCase__ : Dict = False
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[Version, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[List[Callable]] = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 0
__lowercase = dataset_name
__lowercase = cache_dir
__lowercase = use_local_dummy_data
__lowercase = config
# download_callbacks take a single url as input
__lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowercase = str(_UpperCAmelCase )
# to be downloaded
__lowercase = None
__lowercase = None
@property
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self._dummy_file is None:
__lowercase = self.download_dummy_data()
return self._dummy_file
@property
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowercase = cached_path(
_UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCAmelCase , force_extract=_UpperCAmelCase )
return os.path.join(_UpperCAmelCase , self.dummy_file_name )
@property
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self._bucket_url is None:
__lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , *_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return self.create_dummy_data_dict(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(_UpperCAmelCase , _UpperCAmelCase )
else:
return self.create_dummy_data_single(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple , *_UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
return path
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return {}
def a__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for single_url in single_urls:
download_callback(_UpperCAmelCase )
else:
__lowercase = single_urls
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) ) for x in single_urls]
else:
__lowercase = single_urls
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) )
__lowercase = value
# make sure that values are unique
if all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , _UpperCAmelCase ) ) for url in data_url )
__lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowercase = [data_url[0]] * len(_UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_UpperCAmelCase )
return dummy_data_list
def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def a__ ( self : int ) -> str:
"""simple docstring"""
pass
def a__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
def _iter_archive_members(_UpperCAmelCase : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__lowercase = Path(self.dummy_file ).parent
__lowercase = path.relative_to(_UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_UpperCAmelCase )
__lowercase = Path(_UpperCAmelCase )
__lowercase = _iter_archive_members(_UpperCAmelCase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_UpperCAmelCase ).as_posix(), file_path.open('rb' )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [paths]
for path in paths:
if os.path.isfile(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_UpperCAmelCase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase )
| 688 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.