code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = 42
_UpperCamelCase = 42
class _snake_case ( snake_case , snake_case ):
"""simple docstring"""
_UpperCamelCase = 1
@register_to_config
def __init__( self , UpperCAmelCase__ = 2000 , UpperCAmelCase__ = 0.1_5 , UpperCAmelCase__ = 0.0_1 , UpperCAmelCase__ = 1_3_4_8.0 , UpperCAmelCase__ = 1e-5 , UpperCAmelCase__ = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
a_ = sigma_max
# setable values
a_ = None
self.set_sigmas(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None ) -> Union[str, Any]:
a_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
a_ = torch.linspace(1 , UpperCAmelCase__ , UpperCAmelCase__ , device=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None ) -> Optional[Any]:
a_ = sigma_min if sigma_min is not None else self.config.sigma_min
a_ = sigma_max if sigma_max is not None else self.config.sigma_max
a_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
a_ = torch.exp(torch.linspace(math.log(UpperCAmelCase__ ) , math.log(UpperCAmelCase__ ) , UpperCAmelCase__ ) )
a_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
a_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
a_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
a_ = timesteps.to(self.discrete_sigmas.device )
a_ = self.discrete_sigmas[timesteps].to(sample.device )
a_ = self.get_adjacent_sigma(UpperCAmelCase__ , UpperCAmelCase__ ).to(sample.device )
a_ = torch.zeros_like(UpperCAmelCase__ )
a_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
a_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
a_ = diffusion.unsqueeze(-1 )
a_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
a_ = randn_tensor(
sample.shape , layout=sample.layout , generator=UpperCAmelCase__ , device=sample.device , dtype=sample.dtype )
a_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
a_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=UpperCAmelCase__ , prev_sample_mean=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
a_ = randn_tensor(sample.shape , layout=sample.layout , generator=UpperCAmelCase__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
a_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
a_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
a_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
a_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
a_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
a_ = step_size.unsqueeze(-1 )
a_ = sample + step_size * model_output
a_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
a_ = timesteps.to(original_samples.device )
a_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
a_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(UpperCAmelCase__ ) * sigmas[:, None, None, None]
)
a_ = noise + original_samples
return noisy_samples
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
assert column_title.isupper()
a_ = 0
a_ = len(_UpperCAmelCase ) - 1
a_ = 0
while index >= 0:
a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 1 |
'''simple docstring'''
from PIL import Image
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Image:
"""simple docstring"""
a_ = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(_UpperCAmelCase ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(_UpperCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
__lowerCAmelCase =change_contrast(img, 170)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 697 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase =logging.get_logger(__name__)
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]:
super().__init__(
feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = spectrogram_length
a_ = num_channels
a_ = patch_size
a_ = feature_size // self.patch_size[1]
a_ = n_fft
a_ = sampling_rate // hop_length_to_sampling_rate
a_ = sampling_rate
a_ = padding_value
a_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray:
a_ = spectrogram(
UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
a_ = log_spec[:, :-1]
a_ = log_spec - 2_0.0
a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a_ = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a_ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCAmelCase__ ):
a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a_ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a_ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a_ = np.array(UpperCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a_ = padded_audio_features * self.padding_value
for i in range(len(UpperCAmelCase__ ) ):
a_ = audio_features[i]
a_ = feature
# return as BatchFeature
if return_attention_mask:
a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
a_ = {'audio_values': padded_audio_features}
a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
return encoded_inputs
| 697 | 1 |
'''simple docstring'''
from __future__ import annotations
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> None:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_UpperCAmelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _UpperCAmelCase , _UpperCAmelCase , )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = []
depth_first_search([] , [] , [] , _UpperCAmelCase , _UpperCAmelCase )
# Print all the boards
for board in boards:
for column in board:
print(_UpperCAmelCase )
print('' )
print(len(_UpperCAmelCase ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
a_ = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
a_ = [element for element in set_a if element in set_b]
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
a_ = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
__lowerCAmelCase ={"a", "b", "c", "d", "e"}
__lowerCAmelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 697 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(_UpperCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , )
def a ( ) -> None:
"""simple docstring"""
a_ = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
a_ = math.log(len(_UpperCAmelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
a_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> float:
"""simple docstring"""
a_ = u
for i in range(1 , _UpperCAmelCase ):
a_ = temp * (u - i)
return temp
def a ( ) -> None:
"""simple docstring"""
a_ = int(input('enter the numbers of values: ' ) )
a_ = []
for _ in range(_UpperCAmelCase ):
y.append([] )
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
y[i].append(_UpperCAmelCase )
a_ = 0
print('enter the values of parameters in a list: ' )
a_ = list(map(_UpperCAmelCase , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(_UpperCAmelCase ):
a_ = float(input() )
a_ = int(input('enter the value to interpolate: ' ) )
a_ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _UpperCAmelCase ):
for j in range(n - i ):
a_ = y[j + 1][i - 1] - y[j][i - 1]
a_ = y[0][0]
for i in range(1 , _UpperCAmelCase ):
summ += (ucal(_UpperCAmelCase , _UpperCAmelCase ) * y[0][i]) / math.factorial(_UpperCAmelCase )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 697 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _snake_case ( snake_case ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str:
super().__init__(
features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = Generator(
cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
a_ = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
a_ = None
a_ = None
a_ = None
a_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
a_ = self.builder.as_dataset(
split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 697 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
torch.manual_seed(0 )
a_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = self.dummy_uncond_unet
a_ = PNDMScheduler()
a_ = PNDMPipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pndm.to(UpperCAmelCase__ )
pndm.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = torch.manual_seed(0 )
a_ = pndm(generator=UpperCAmelCase__ , num_inference_steps=20 , output_type='numpy' ).images
a_ = torch.manual_seed(0 )
a_ = pndm(generator=UpperCAmelCase__ , num_inference_steps=20 , output_type='numpy' , return_dict=UpperCAmelCase__ )[0]
a_ = image[0, -3:, -3:, -1]
a_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = 'google/ddpm-cifar10-32'
a_ = UNetaDModel.from_pretrained(UpperCAmelCase__ )
a_ = PNDMScheduler()
a_ = PNDMPipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pndm.to(UpperCAmelCase__ )
pndm.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = torch.manual_seed(0 )
a_ = pndm(generator=UpperCAmelCase__ , output_type='numpy' ).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 697 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase =numpy.array([0, 0])
__lowerCAmelCase =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase =numpy.array([1, 0])
__lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(_UpperCAmelCase ):
a_ = iteration_step(_UpperCAmelCase )
return vectors
def a ( _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(_UpperCAmelCase )
a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 697 | 1 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
"""simple docstring"""
a_ = XCLIPTextConfig()
# derive patch size from model name
a_ = model_name.find('patch' )
a_ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
a_ = XCLIPVisionConfig(patch_size=_UpperCAmelCase , num_frames=_UpperCAmelCase )
if "large" in model_name:
a_ = 7_6_8
a_ = 3_0_7_2
a_ = 1_2
a_ = 1_0_2_4
a_ = 4_0_9_6
a_ = 1_6
a_ = 2_4
a_ = 7_6_8
a_ = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
a_ = 3_3_6
a_ = XCLIPConfig.from_text_vision_configs(_UpperCAmelCase , _UpperCAmelCase )
if "large" in model_name:
a_ = 7_6_8
return config
def a ( _UpperCAmelCase ) -> Dict:
"""simple docstring"""
if name == "token_embedding.weight":
a_ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
a_ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
a_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
a_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
a_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
a_ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
a_ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
a_ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
a_ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
a_ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
a_ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
a_ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
a_ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
a_ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
a_ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
a_ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
a_ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
a_ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
a_ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
a_ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
a_ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
a_ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a_ = orig_state_dict.pop(_UpperCAmelCase )
if "attn.in_proj" in key:
a_ = key.split('.' )
if key.startswith('visual' ):
a_ = key_split[3]
a_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
a_ = val[
:dim, :
]
a_ = val[
dim : dim * 2, :
]
a_ = val[
-dim:, :
]
else:
a_ = val[
:dim
]
a_ = val[
dim : dim * 2
]
a_ = val[
-dim:
]
else:
if "weight" in key:
a_ = val[
:dim, :
]
a_ = val[
dim : dim * 2, :
]
a_ = val[
-dim:, :
]
else:
a_ = val[:dim]
a_ = val[
dim : dim * 2
]
a_ = val[-dim:]
elif key.startswith('mit' ):
a_ = key_split[2]
a_ = config.vision_config.mit_hidden_size
if "weight" in key:
a_ = val[:dim, :]
a_ = val[dim : dim * 2, :]
a_ = val[-dim:, :]
else:
a_ = val[:dim]
a_ = val[dim : dim * 2]
a_ = val[-dim:]
else:
a_ = key_split[2]
a_ = config.text_config.hidden_size
if "weight" in key:
a_ = val[:dim, :]
a_ = val[
dim : dim * 2, :
]
a_ = val[-dim:, :]
else:
a_ = val[:dim]
a_ = val[
dim : dim * 2
]
a_ = val[-dim:]
else:
a_ = rename_key(_UpperCAmelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
a_ = val.T
a_ = val
return orig_state_dict
def a ( _UpperCAmelCase ) -> Dict:
"""simple docstring"""
if num_frames == 8:
a_ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 1_6:
a_ = 'eating_spaghetti.npy'
elif num_frames == 3_2:
a_ = 'eating_spaghetti_32_frames.npy'
a_ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=_UpperCAmelCase , repo_type='dataset' , )
a_ = np.load(_UpperCAmelCase )
return list(_UpperCAmelCase )
def a ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ) -> Tuple:
"""simple docstring"""
a_ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
a_ = model_to_url[model_name]
a_ = 8
if "16-frames" in model_name:
a_ = 1_6
elif "shot" in model_name:
a_ = 3_2
a_ = get_xclip_config(_UpperCAmelCase , _UpperCAmelCase )
a_ = XCLIPModel(_UpperCAmelCase )
model.eval()
if "drive" in checkpoint_url:
a_ = 'pytorch_model.bin'
gdown.cached_download(_UpperCAmelCase , _UpperCAmelCase , quiet=_UpperCAmelCase )
a_ = torch.load(_UpperCAmelCase , map_location='cpu' )['model']
else:
a_ = torch.hub.load_state_dict_from_url(_UpperCAmelCase )['model']
a_ = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
a_ = XCLIPModel(_UpperCAmelCase )
a_ , a_ = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
a_ = 3_3_6 if model_name == 'xclip-large-patch14-16-frames' else 2_2_4
a_ = VideoMAEImageProcessor(size=_UpperCAmelCase )
a_ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
a_ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
a_ = XCLIPProcessor(image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
a_ = prepare_video(_UpperCAmelCase )
a_ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=_UpperCAmelCase , return_tensors='pt' , padding=_UpperCAmelCase )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
a_ = model(**_UpperCAmelCase )
# Verify outputs
a_ = outputs.logits_per_video
a_ = logits_per_video.softmax(dim=1 )
print('Probs:' , _UpperCAmelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
a_ = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
a_ = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
a_ = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
a_ = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
a_ = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
a_ = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
a_ = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
a_ = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
a_ = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
a_ = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
a_ = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
a_ = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
a_ = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
a_ = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
a_ = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
a_ = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
a_ = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
a_ = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(_UpperCAmelCase , organization='nielsr' )
processor.push_to_hub(_UpperCAmelCase , organization='nielsr' )
slow_tokenizer.push_to_hub(_UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__lowerCAmelCase =parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 697 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def a ( _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
a_ = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = set()
for token in tokens:
a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
a_ = list(_UpperCAmelCase )
return word_list
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
a_ = bert_tokens
a_ , a_ = 0, len(_UpperCAmelCase )
while start < end:
a_ = True
if is_chinese(bert_word[start] ):
a_ = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
a_ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ = '##' + bert_word[j]
a_ = start + i
a_ = False
break
if single_word:
start += 1
return bert_word
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
a_ = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
a_ = []
for id in input_ids:
a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
a_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
a_ = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a_ = f.readlines()
a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ = LTP(args.ltp ) # faster in GPU device
a_ = BertTokenizer.from_pretrained(args.bert )
a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__lowerCAmelCase =parser.parse_args()
main(args)
| 697 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase =get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = PegasusTokenizer
_UpperCamelCase = PegasusTokenizerFast
_UpperCamelCase = True
_UpperCamelCase = True
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
a_ = PegasusTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase__ ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> int:
return ("This is a test", "This is a test")
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = '</s>'
a_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(UpperCAmelCase__ ) , 1103 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
a_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
a_ = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
a_ = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ).input_ids[0]
a_ = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ).input_ids[0]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
a_ = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
a_ = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
a_ = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase__ ).input_ids[0]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
a_ = 'To ensure a smooth flow of bank resolutions.'
a_ = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
a_ = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase__ ).input_ids[0]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = ['This is going to be way too long.' * 150, 'short example']
a_ = ['not super long but more than 5 tokens', 'tiny']
a_ = self._large_tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='pt' )
a_ = self._large_tokenizer(
text_target=UpperCAmelCase__ , max_length=5 , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase__ ) == 2 # input_ids, attention_mask.
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# fmt: off
a_ = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = PegasusTokenizer
_UpperCamelCase = PegasusTokenizerFast
_UpperCamelCase = True
_UpperCamelCase = True
def __SCREAMING_SNAKE_CASE ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
a_ = PegasusTokenizer(UpperCAmelCase__ , offset=0 , mask_token_sent=UpperCAmelCase__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase__ ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Optional[Any]:
return ("This is a test", "This is a test")
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
a_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
a_ = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
a_ = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ).input_ids[0]
a_ = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ).input_ids[0]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = ['This is going to be way too long.' * 1000, 'short example']
a_ = ['not super long but more than 5 tokens', 'tiny']
a_ = self._large_tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='pt' )
a_ = self._large_tokenizer(
text_target=UpperCAmelCase__ , max_length=5 , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase__ ) == 2 # input_ids, attention_mask.
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
a_ = self._large_tokenizer(UpperCAmelCase__ ).input_ids
self.assertListEqual(
UpperCAmelCase__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 697 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": 512,
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = RetriBertTokenizer
_UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**UpperCAmelCase__ )
a_ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str:
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 697 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "levit"
def __init__( self , UpperCAmelCase__=224 , UpperCAmelCase__=3 , UpperCAmelCase__=3 , UpperCAmelCase__=2 , UpperCAmelCase__=1 , UpperCAmelCase__=16 , UpperCAmelCase__=[128, 256, 384] , UpperCAmelCase__=[4, 8, 12] , UpperCAmelCase__=[4, 4, 4] , UpperCAmelCase__=[16, 16, 16] , UpperCAmelCase__=0 , UpperCAmelCase__=[2, 2, 2] , UpperCAmelCase__=[2, 2, 2] , UpperCAmelCase__=0.0_2 , **UpperCAmelCase__ , ) -> Optional[int]:
super().__init__(**UpperCAmelCase__ )
a_ = image_size
a_ = num_channels
a_ = kernel_size
a_ = stride
a_ = padding
a_ = hidden_sizes
a_ = num_attention_heads
a_ = depths
a_ = key_dim
a_ = drop_path_rate
a_ = patch_size
a_ = attention_ratio
a_ = mlp_ratio
a_ = initializer_range
a_ = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 697 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "lilt"
def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = classifier_dropout
a_ = channel_shrink_ratio
a_ = max_ad_position_embeddings
| 697 | 1 |
'''simple docstring'''
from collections.abc import Generator
def a ( ) -> Generator[int, None, None]:
"""simple docstring"""
a_ , a_ = 0, 1
while True:
a_ , a_ = b, a + b
yield b
def a ( _UpperCAmelCase = 1_0_0_0 ) -> int:
"""simple docstring"""
a_ = 1
a_ = fibonacci_generator()
while len(str(next(_UpperCAmelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 697 |
'''simple docstring'''
from __future__ import annotations
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# We need to create solution object to save path.
a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase )
if solved:
print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
a_ = 1
return True
a_ = (not i < 0) and (not j < 0) # Check lower bounds
a_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
a_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
a_ = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase )
):
return True
a_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase =16
__lowerCAmelCase =32
def a ( _UpperCAmelCase , _UpperCAmelCase = 1_6 ) -> Union[str, Any]:
"""simple docstring"""
a_ = AutoTokenizer.from_pretrained('bert-base-cased' )
a_ = load_dataset('glue' , 'mrpc' )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
a_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a_ = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a_ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a_ = 1_6
elif accelerator.mixed_precision != "no":
a_ = 8
else:
a_ = None
return tokenizer.pad(
_UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
a_ = DataLoader(
tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
a_ = DataLoader(
tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCAmelCase =mocked_dataloaders # noqa: F811
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _UpperCAmelCase ) == "1":
a_ = 2
# New Code #
a_ = int(args.gradient_accumulation_steps )
# Initialize accelerator
a_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a_ = config['lr']
a_ = int(config['num_epochs'] )
a_ = int(config['seed'] )
a_ = int(config['batch_size'] )
a_ = evaluate.load('glue' , 'mrpc' )
set_seed(_UpperCAmelCase )
a_ , a_ = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a_ = model.to(accelerator.device )
# Instantiate optimizer
a_ = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
a_ = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a_ , a_ , a_ , a_ , a_ = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCAmelCase ):
a_ = model(**_UpperCAmelCase )
a_ = output.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a_ = model(**_UpperCAmelCase )
a_ = outputs.logits.argmax(dim=-1 )
a_ , a_ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
a_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _UpperCAmelCase )
def a ( ) -> Union[str, Any]:
"""simple docstring"""
a_ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=_UpperCAmelCase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
a_ = parser.parse_args()
a_ = {'lr': 2E-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 697 |
'''simple docstring'''
__lowerCAmelCase ={
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
__lowerCAmelCase ={
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
"""simple docstring"""
a_ = from_type.lower().strip('s' )
a_ = to_type.lower().strip('s' )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
a_ = METRIC_CONVERSION[from_sanitized]
a_ = METRIC_CONVERSION[to_sanitized]
a_ = 1
if from_exponent > to_exponent:
a_ = from_exponent - to_exponent
else:
a_ = -(to_exponent - from_exponent)
return value * pow(1_0 , _UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__lowerCAmelCase =logging.getLogger(__name__)
@dataclass
class _snake_case :
"""simple docstring"""
_UpperCamelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCamelCase = field(
default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_UpperCamelCase = field(
default=snake_case , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
_UpperCamelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class _snake_case :
"""simple docstring"""
_UpperCamelCase = field(default=snake_case , metadata={"help": "The input training data file (a text file)."} )
_UpperCamelCase = field(
default=snake_case , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
_UpperCamelCase = field(
default=snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} )
_UpperCamelCase = field(
default=snake_case , metadata={"help": "The number of processes to use for the preprocessing."} , )
_UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
_UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
if self.train_file is not None:
a_ = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
a_ = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _snake_case :
"""simple docstring"""
_UpperCamelCase = 42
_UpperCamelCase = True
_UpperCamelCase = None
_UpperCamelCase = None
def __call__( self , UpperCAmelCase__ ) -> Optional[Any]:
a_ = 'label' if 'label' in features[0].keys() else 'labels'
a_ = [feature.pop(UpperCAmelCase__ ) for feature in features]
a_ = len(UpperCAmelCase__ )
a_ = len(features[0]['input_ids'] )
a_ = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase__ )] for feature in features
]
a_ = list(chain(*UpperCAmelCase__ ) )
a_ = self.tokenizer.pad(
UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
a_ = {k: v.view(UpperCAmelCase__ , UpperCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
a_ = torch.tensor(UpperCAmelCase__ , dtype=torch.intaa )
return batch
def a ( ) -> Tuple:
"""simple docstring"""
a_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a_ , a_ , a_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a_ , a_ , a_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , _UpperCAmelCase , _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a_ = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
datasets.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
a_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
a_ = {}
if data_args.train_file is not None:
a_ = data_args.train_file
if data_args.validation_file is not None:
a_ = data_args.validation_file
a_ = data_args.train_file.split('.' )[-1]
a_ = load_dataset(
_UpperCAmelCase , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
a_ = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
a_ = [F'''ending{i}''' for i in range(4 )]
a_ = 'sent1'
a_ = 'sent2'
if data_args.max_seq_length is None:
a_ = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
a_ = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
a_ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_UpperCAmelCase ):
a_ = [[context] * 4 for context in examples[context_name]]
a_ = examples[question_header_name]
a_ = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(_UpperCAmelCase )
]
# Flatten out
a_ = list(chain(*_UpperCAmelCase ) )
a_ = list(chain(*_UpperCAmelCase ) )
# Tokenize
a_ = tokenizer(
_UpperCAmelCase , _UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
a_ = raw_datasets['train']
if data_args.max_train_samples is not None:
a_ = min(len(_UpperCAmelCase ) , data_args.max_train_samples )
a_ = train_dataset.select(range(_UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
a_ = train_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
a_ = raw_datasets['validation']
if data_args.max_eval_samples is not None:
a_ = min(len(_UpperCAmelCase ) , data_args.max_eval_samples )
a_ = eval_dataset.select(range(_UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
a_ = eval_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
a_ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_UpperCAmelCase ):
a_ , a_ = eval_predictions
a_ = np.argmax(_UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
a_ = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , compute_metrics=_UpperCAmelCase , )
# Training
if training_args.do_train:
a_ = None
if training_args.resume_from_checkpoint is not None:
a_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a_ = last_checkpoint
a_ = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
a_ = train_result.metrics
a_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
a_ = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics('train' , _UpperCAmelCase )
trainer.save_metrics('train' , _UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a_ = trainer.evaluate()
a_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase )
a_ = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics('eval' , _UpperCAmelCase )
trainer.save_metrics('eval' , _UpperCAmelCase )
a_ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def a ( _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 697 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase ="naver-clova-ix/donut-base"
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
a_ = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
a_ = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 1 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase =get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = AlbertTokenizer
_UpperCamelCase = AlbertTokenizerFast
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
a_ = AlbertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Optional[int]:
a_ = 'this is a test'
a_ = 'this is a test'
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = '<pad>'
a_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(UpperCAmelCase__ ) , 3_0000 )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = 'I was born in 92000, and this is falsé.'
a_ = tokenizer.tokenize(UpperCAmelCase__ )
a_ = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
a_ = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(UpperCAmelCase__ )
a_ = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = AlbertTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
a_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase__ , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [48, 25, 21, 1289] )
a_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase__ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
a_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
a_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = AlbertTokenizer(UpperCAmelCase__ )
a_ = tokenizer.encode('sequence builders' )
a_ = tokenizer.encode('multi-sequence build' )
a_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
a_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# fmt: off
a_ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 697 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]:
a_ = parent
a_ = out_indices if out_indices is not None else [4]
a_ = stage_names
a_ = out_features
a_ = backbone
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = use_pretrained_backbone
a_ = is_training
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = TimmBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ = config_and_inputs
a_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (TimmBackbone,) if is_torch_available() else ()
_UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = TimmBackboneModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = 'resnet18'
a_ = 'microsoft/resnet-18'
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
a_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
a_ = self.all_model_classes[0]
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = model(**UpperCAmelCase__ )
a_ = outputs[0][-1]
# Encoder-/Decoder-only models
a_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = None
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = False
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
| 697 | 1 |
'''simple docstring'''
import os
def a ( ) -> List[str]:
"""simple docstring"""
with open(os.path.dirname(_UpperCAmelCase ) + '/p022_names.txt' ) as file:
a_ = str(file.readlines()[0] )
a_ = names.replace('"' , '' ).split(',' )
names.sort()
a_ = 0
a_ = 0
for i, name in enumerate(_UpperCAmelCase ):
for letter in name:
name_score += ord(_UpperCAmelCase ) - 6_4
total_score += (i + 1) * name_score
a_ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 697 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]:
a_ = size if size is not None else {'height': 18, 'width': 18}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = apply_ocr
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# with apply_OCR = True
a_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
a_ = Image.open(ds[0]['file'] ).convert('RGB' )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 697 | 1 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = None
_UpperCamelCase = BloomTokenizerFast
_UpperCamelCase = BloomTokenizerFast
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = "tokenizer_file"
_UpperCamelCase = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().setUp()
a_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase__ ) -> Any:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self.get_rust_tokenizer()
a_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
a_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
a_ = tokenizer.batch_encode_plus(UpperCAmelCase__ )['input_ids']
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__=6 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
a_ = 'This is a simple input'
a_ = ['This is a simple input 1', 'This is a simple input 2']
a_ = ('This is a simple input', 'This is a pair')
a_ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
a_ = None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='max_length' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='max_length' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='max_length' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='max_length' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='max_length' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='max_length' , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.get_rust_tokenizer()
a_ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=UpperCAmelCase__ )
a_ = next(iter(UpperCAmelCase__ ) )['premise'] # pick up one data
a_ = list(sample_data.values() )
a_ = list(map(tokenizer.encode , UpperCAmelCase__ ) )
a_ = [tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 697 |
'''simple docstring'''
import math
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
a_ = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
a_ = []
a_ = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__lowerCAmelCase =None
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ="▁"
__lowerCAmelCase ={"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
__lowerCAmelCase ={
"google/pegasus-xsum": 512,
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = PegasusTokenizer
_UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="<pad>" , UpperCAmelCase__="</s>" , UpperCAmelCase__="<unk>" , UpperCAmelCase__="<mask_2>" , UpperCAmelCase__="<mask_1>" , UpperCAmelCase__=None , UpperCAmelCase__=103 , **UpperCAmelCase__ , ) -> Tuple:
a_ = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError(
F'''additional_special_tokens should be of type {type(UpperCAmelCase__ )}, but is'''
F''' {type(UpperCAmelCase__ )}''' )
a_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(UpperCAmelCase__ ) , self.offset - 1 )
]
if len(set(UpperCAmelCase__ ) ) != len(UpperCAmelCase__ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
a_ = additional_special_tokens_extended
else:
a_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , mask_token_sent=UpperCAmelCase__ , offset=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = vocab_file
a_ = False if not self.vocab_file else True
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Dict:
a_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase__ )
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a_ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 697 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = 10
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = [1, 2, 3, 4]
a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = ''
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
a_ , a_ = process_story(UpperCAmelCase__ )
a_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = ['It was the best of times.']
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = torch.tensor([1, 2, 3, 4] )
a_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = 101
a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "lilt"
def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = classifier_dropout
a_ = channel_shrink_ratio
a_ = max_ad_position_embeddings
| 697 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class _snake_case ( unittest.TestCase , snake_case ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = load_tool('text-question-answering' )
self.tool.setup()
a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
| 697 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 | 1 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_UpperCAmelCase ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_UpperCAmelCase ) == 1:
return True
a_ = series[1] - series[0]
for index in range(len(_UpperCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def a ( _UpperCAmelCase ) -> float:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_UpperCAmelCase ) == 0:
raise ValueError('Input list must be a non empty list' )
a_ = 0
for val in series:
answer += val
return answer / len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "vit"
def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict:
super().__init__(**UpperCAmelCase__ )
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = encoder_stride
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 697 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "layoutlmv3"
def __init__( self , UpperCAmelCase__=5_0265 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-5 , UpperCAmelCase__=1 , UpperCAmelCase__=0 , UpperCAmelCase__=2 , UpperCAmelCase__=1024 , UpperCAmelCase__=128 , UpperCAmelCase__=128 , UpperCAmelCase__=True , UpperCAmelCase__=32 , UpperCAmelCase__=128 , UpperCAmelCase__=64 , UpperCAmelCase__=256 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=224 , UpperCAmelCase__=3 , UpperCAmelCase__=16 , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> Dict:
super().__init__(
vocab_size=UpperCAmelCase__ , hidden_size=UpperCAmelCase__ , num_hidden_layers=UpperCAmelCase__ , num_attention_heads=UpperCAmelCase__ , intermediate_size=UpperCAmelCase__ , hidden_act=UpperCAmelCase__ , hidden_dropout_prob=UpperCAmelCase__ , attention_probs_dropout_prob=UpperCAmelCase__ , max_position_embeddings=UpperCAmelCase__ , type_vocab_size=UpperCAmelCase__ , initializer_range=UpperCAmelCase__ , layer_norm_eps=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = max_ad_position_embeddings
a_ = coordinate_size
a_ = shape_size
a_ = has_relative_attention_bias
a_ = rel_pos_bins
a_ = max_rel_pos
a_ = has_spatial_attention_bias
a_ = rel_ad_pos_bins
a_ = max_rel_ad_pos
a_ = text_embed
a_ = visual_embed
a_ = input_size
a_ = num_channels
a_ = patch_size
a_ = classifier_dropout
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.12" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-5
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return 12
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = -1 , UpperCAmelCase__ = -1 , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = 3 , UpperCAmelCase__ = 40 , UpperCAmelCase__ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , UpperCAmelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a_ = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a_ = processor.tokenizer.num_special_tokens_to_add(UpperCAmelCase__ )
a_ = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
a_ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
a_ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
a_ = self._generate_dummy_images(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
a_ = dict(
processor(
UpperCAmelCase__ , text=UpperCAmelCase__ , boxes=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , ) )
return inputs
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase = 5_0 ) -> int:
"""simple docstring"""
a_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 1 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = 1
a_ = 3
a_ = (32, 32)
a_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase__ )
return image
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
torch.manual_seed(0 )
a_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
torch.manual_seed(0 )
a_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
torch.manual_seed(0 )
a_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCAmelCase__ )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
def extract(*UpperCAmelCase__ , **UpperCAmelCase__ ):
class _snake_case :
"""simple docstring"""
def __init__( self ) -> Any:
a_ = torch.ones([0] )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> List[Any]:
self.pixel_values.to(UpperCAmelCase__ )
return self
return Out()
return extract
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
a_ = self.dummy_cond_unet
a_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
a_ = self.dummy_vae
a_ = self.dummy_text_encoder
a_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
a_ = StableDiffusionPipeline(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , feature_extractor=self.dummy_extractor , )
a_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = 'A painting of a squirrel eating a burger'
a_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
a_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
a_ = output.images
a_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
a_ = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase__ , )[0]
a_ = image[0, -3:, -3:, -1]
a_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a_ = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
a_ = self.dummy_cond_unet
a_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase__ )
a_ = self.dummy_vae
a_ = self.dummy_text_encoder
a_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
a_ = StableDiffusionPipeline(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , feature_extractor=self.dummy_extractor , )
a_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = 'A painting of a squirrel eating a burger'
a_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
a_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
a_ = output.images
a_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
a_ = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase__ , )[0]
a_ = image[0, -3:, -3:, -1]
a_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a_ = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert isinstance(pipe.scheduler , UpperCAmelCase__ )
assert pipe.safety_checker is None
a_ = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase__ )
a_ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a_ = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.dummy_cond_unet
a_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase__ )
a_ = self.dummy_vae
a_ = self.dummy_text_encoder
a_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
a_ = unet.half()
a_ = vae.half()
a_ = bert.half()
# make sure here that pndm scheduler skips prk
a_ = StableDiffusionPipeline(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , feature_extractor=self.dummy_extractor , )
a_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = 'A painting of a squirrel eating a burger'
a_ = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase__ )
a_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
a_ = 40_0366_0346
a_ = 7
# without safety guidance (sld_guidance_scale = 0)
a_ = torch.manual_seed(UpperCAmelCase__ )
a_ = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
a_ = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
a_ = torch.manual_seed(UpperCAmelCase__ )
a_ = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
a_ = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase__ )
a_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = 'padme amidala taking a bath artwork, safe for work, no nudity'
a_ = 27_3497_1755
a_ = 7
a_ = torch.manual_seed(UpperCAmelCase__ )
a_ = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
a_ = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
a_ = torch.manual_seed(UpperCAmelCase__ )
a_ = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
a_ = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
a_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
a_ = 10_4435_5234
a_ = 12
a_ = torch.manual_seed(UpperCAmelCase__ )
a_ = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
a_ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
a_ = torch.manual_seed(UpperCAmelCase__ )
a_ = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
a_ = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def a ( ) -> None:
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 697 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = tempfile.mkdtemp()
# fmt: off
a_ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
a_ = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
a_ = os.path.join(self.tmpdirname , UpperCAmelCase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase__ ) -> int:
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase__ ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a_ = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.get_tokenizer()
a_ = self.get_image_processor()
a_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
a_ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
a_ = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
a_ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
a_ = self.prepare_image_inputs()
a_ = image_processor(UpperCAmelCase__ , return_tensors='np' )
a_ = processor(images=UpperCAmelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
a_ = 'lower newer'
a_ = processor(text=UpperCAmelCase__ )
a_ = tokenizer(UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
a_ = 'lower newer'
a_ = self.prepare_image_inputs()
a_ = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(UpperCAmelCase__ ):
processor()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
a_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a_ = processor.batch_decode(UpperCAmelCase__ )
a_ = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
a_ = 'lower newer'
a_ = self.prepare_image_inputs()
a_ = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 697 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _snake_case ( snake_case ):
"""simple docstring"""
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str:
a_ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' )
download_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = model
a_ = cache
a_ = force
a_ = trust_remote_code
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 697 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class _snake_case ( unittest.TestCase , snake_case ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = load_tool('text-question-answering' )
self.tool.setup()
a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
assert column_title.isupper()
a_ = 0
a_ = len(_UpperCAmelCase ) - 1
a_ = 0
while index >= 0:
a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 1 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase )
else:
a_ = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase )
for i, tensor in enumerate(_UpperCAmelCase ):
if padding_side == "right":
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = tensor[:sequence_length]
else:
a_ = tensor[:sequence_length]
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = tensor[:sequence_length]
else:
a_ = tensor[:sequence_length]
return out_tensor.tolist()
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
a_ = ord(_UpperCAmelCase )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
a_ = unicodedata.category(_UpperCAmelCase )
if cat.startswith('P' ):
return True
return False
@dataclass
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = 42
_UpperCamelCase = True
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = -100
_UpperCamelCase = "pt"
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Optional[Any]:
import torch
a_ = 'label' if 'label' in features[0].keys() else 'labels'
a_ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
a_ = self.tokenizer.pad(
UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , )
if labels is None:
return batch
a_ = torch.tensor(batch['entity_ids'] ).shape[1]
a_ = self.tokenizer.padding_side
if padding_side == "right":
a_ = [
list(UpperCAmelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase__ )) for label in labels
]
else:
a_ = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase__ )) + list(UpperCAmelCase__ ) for label in labels
]
a_ = [feature['ner_tags'] for feature in features]
a_ = padding_tensor(UpperCAmelCase__ , -1 , UpperCAmelCase__ , UpperCAmelCase__ )
a_ = [feature['original_entity_spans'] for feature in features]
a_ = padding_tensor(UpperCAmelCase__ , (-1, -1) , UpperCAmelCase__ , UpperCAmelCase__ )
a_ = {k: torch.tensor(UpperCAmelCase__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 697 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase =logging.get_logger(__name__)
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]:
super().__init__(
feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = spectrogram_length
a_ = num_channels
a_ = patch_size
a_ = feature_size // self.patch_size[1]
a_ = n_fft
a_ = sampling_rate // hop_length_to_sampling_rate
a_ = sampling_rate
a_ = padding_value
a_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray:
a_ = spectrogram(
UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
a_ = log_spec[:, :-1]
a_ = log_spec - 2_0.0
a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a_ = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a_ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCAmelCase__ ):
a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a_ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a_ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a_ = np.array(UpperCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a_ = padded_audio_features * self.padding_value
for i in range(len(UpperCAmelCase__ ) ):
a_ = audio_features[i]
a_ = feature
# return as BatchFeature
if return_attention_mask:
a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
a_ = {'audio_values': padded_audio_features}
a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
return encoded_inputs
| 697 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=snake_case ):
"""simple docstring"""
_UpperCamelCase = ["flax", "transformers"]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> int:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> Union[str, Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> Tuple:
requires_backends(cls , ['flax', 'transformers'] )
class _snake_case ( metaclass=snake_case ):
"""simple docstring"""
_UpperCamelCase = ["flax", "transformers"]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> str:
requires_backends(cls , ['flax', 'transformers'] )
class _snake_case ( metaclass=snake_case ):
"""simple docstring"""
_UpperCamelCase = ["flax", "transformers"]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> int:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> Tuple:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
class _snake_case ( metaclass=snake_case ):
"""simple docstring"""
_UpperCamelCase = ["flax", "transformers"]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> Dict:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
a_ = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
a_ = [element for element in set_a if element in set_b]
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
a_ = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
__lowerCAmelCase ={"a", "b", "c", "d", "e"}
__lowerCAmelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 697 | 1 |
'''simple docstring'''
import math
import sys
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
if number != int(_UpperCAmelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
a_ = [-1] * (number + 1)
a_ = 0
for i in range(1 , number + 1 ):
a_ = sys.maxsize
a_ = int(math.sqrt(_UpperCAmelCase ) )
for j in range(1 , root + 1 ):
a_ = 1 + answers[i - (j**2)]
a_ = min(_UpperCAmelCase , _UpperCAmelCase )
a_ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
a_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 1 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=0.2 , UpperCAmelCase__=0.2 ) -> Dict:
a_ = bp_numa
a_ = bp_numa
a_ = bp_numa
a_ = conva_get[:2]
a_ = conva_get[2]
a_ = size_pa
a_ = rate_w
a_ = rate_t
a_ = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
a_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
a_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
a_ = -2 * np.random.rand(self.conva[1] ) + 1
a_ = -2 * np.random.rand(self.num_bpa ) + 1
a_ = -2 * np.random.rand(self.num_bpa ) + 1
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> str:
# save model dict with pickle
a_ = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(UpperCAmelCase__ , 'wb' ) as f:
pickle.dump(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''Model saved: {save_path}''' )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , UpperCAmelCase__ ) -> Any:
# read saved model
with open(UpperCAmelCase__ , 'rb' ) as f:
a_ = pickle.load(UpperCAmelCase__ ) # noqa: S301
a_ = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
a_ = model_dic.get('size_pooling1' )
a_ = model_dic.get('num_bp1' )
a_ = model_dic.get('num_bp2' )
a_ = model_dic.get('num_bp3' )
a_ = model_dic.get('rate_weight' )
a_ = model_dic.get('rate_thre' )
# create model instance
a_ = CNN(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# modify model parameter
a_ = model_dic.get('w_conv1' )
a_ = model_dic.get('wkj' )
a_ = model_dic.get('vji' )
a_ = model_dic.get('thre_conv1' )
a_ = model_dic.get('thre_bp2' )
a_ = model_dic.get('thre_bp3' )
return conv_ins
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> List[str]:
return 1 / (1 + np.exp(-1 * x ))
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Optional[int]:
return round(UpperCAmelCase__ , 3 )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
# convolution process
a_ = convs[0]
a_ = convs[1]
a_ = np.shape(UpperCAmelCase__ )[0]
# get the data slice of original image data, data_focus
a_ = []
for i_focus in range(0 , size_data - size_conv + 1 , UpperCAmelCase__ ):
for j_focus in range(0 , size_data - size_conv + 1 , UpperCAmelCase__ ):
a_ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(UpperCAmelCase__ )
# calculate the feature map of every single kernel, and saved as list of matrix
a_ = []
a_ = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(UpperCAmelCase__ ):
a_ = []
for i_focus in range(len(UpperCAmelCase__ ) ):
a_ = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(UpperCAmelCase__ ) )
a_ = np.asmatrix(UpperCAmelCase__ ).reshape(
UpperCAmelCase__ , UpperCAmelCase__ )
data_featuremap.append(UpperCAmelCase__ )
# expanding the data slice to One dimenssion
a_ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(UpperCAmelCase__ ) )
a_ = np.asarray(UpperCAmelCase__ )
return focus_list, data_featuremap
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__="average_pool" ) -> str:
# pooling process
a_ = len(featuremaps[0] )
a_ = int(size_map / size_pooling )
a_ = []
for i_map in range(len(UpperCAmelCase__ ) ):
a_ = featuremaps[i_map]
a_ = []
for i_focus in range(0 , UpperCAmelCase__ , UpperCAmelCase__ ):
for j_focus in range(0 , UpperCAmelCase__ , UpperCAmelCase__ ):
a_ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(UpperCAmelCase__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(UpperCAmelCase__ ) )
a_ = np.asmatrix(UpperCAmelCase__ ).reshape(UpperCAmelCase__ , UpperCAmelCase__ )
featuremap_pooled.append(UpperCAmelCase__ )
return featuremap_pooled
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Optional[Any]:
# expanding three dimension data to one dimension list
a_ = []
for i in range(len(UpperCAmelCase__ ) ):
a_ = np.shape(data[i] )
a_ = data[i].reshape(1 , shapes[0] * shapes[1] )
a_ = data_listed.getA().tolist()[0]
data_expanded.extend(UpperCAmelCase__ )
a_ = np.asarray(UpperCAmelCase__ )
return data_expanded
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Optional[int]:
# expanding matrix to one dimension list
a_ = np.asarray(UpperCAmelCase__ )
a_ = np.shape(UpperCAmelCase__ )
a_ = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]:
a_ = []
a_ = 0
for i_map in range(UpperCAmelCase__ ):
a_ = np.ones((size_map, size_map) )
for i in range(0 , UpperCAmelCase__ , UpperCAmelCase__ ):
for j in range(0 , UpperCAmelCase__ , UpperCAmelCase__ ):
a_ = pd_pool[
i_pool
]
a_ = i_pool + 1
a_ = np.multiply(
UpperCAmelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(UpperCAmelCase__ )
return pd_all
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=bool ) -> List[str]:
# model traning
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(UpperCAmelCase__ )) )
print((' - - Shape: Teach_Data ', np.shape(UpperCAmelCase__ )) )
a_ = 0
a_ = []
a_ = 1_0000
while rp < n_repeat and mse >= error_accuracy:
a_ = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(UpperCAmelCase__ ) ):
# print('------------Learning Image: %d--------------'%p)
a_ = np.asmatrix(datas_train[p] )
a_ = np.asarray(datas_teach[p] )
a_ , a_ = self.convolute(
UpperCAmelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a_ = self.pooling(UpperCAmelCase__ , self.size_poolinga )
a_ = np.shape(UpperCAmelCase__ )
a_ = self._expand(UpperCAmelCase__ )
a_ = data_bp_input
a_ = np.dot(UpperCAmelCase__ , self.vji.T ) - self.thre_bpa
a_ = self.sig(UpperCAmelCase__ )
a_ = np.dot(UpperCAmelCase__ , self.wkj.T ) - self.thre_bpa
a_ = self.sig(UpperCAmelCase__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
a_ = np.multiply(
(data_teach - bp_outa) , np.multiply(UpperCAmelCase__ , (1 - bp_outa) ) )
a_ = np.multiply(
np.dot(UpperCAmelCase__ , self.wkj ) , np.multiply(UpperCAmelCase__ , (1 - bp_outa) ) )
a_ = np.dot(UpperCAmelCase__ , self.vji )
a_ = pd_i_all / (self.size_poolinga * self.size_poolinga)
a_ = pd_conva_pooled.T.getA().tolist()
a_ = self._calculate_gradient_from_pool(
UpperCAmelCase__ , UpperCAmelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
a_ = self._expand_mat(pd_conva_all[k_conv] )
a_ = self.rate_weight * np.dot(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
a_ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
a_ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
a_ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
a_ = self.thre_bpa - pd_k_all * self.rate_thre
a_ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
a_ = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
a_ = rp + 1
a_ = error_count / patterns
all_mse.append(UpperCAmelCase__ )
def draw_error():
a_ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(UpperCAmelCase__ , '+-' )
plt.plot(UpperCAmelCase__ , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(UpperCAmelCase__ , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Optional[Any]:
# model predict
a_ = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(UpperCAmelCase__ )) )
for p in range(len(UpperCAmelCase__ ) ):
a_ = np.asmatrix(datas_test[p] )
a_ , a_ = self.convolute(
UpperCAmelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a_ = self.pooling(UpperCAmelCase__ , self.size_poolinga )
a_ = self._expand(UpperCAmelCase__ )
a_ = data_bp_input
a_ = bp_outa * self.vji.T - self.thre_bpa
a_ = self.sig(UpperCAmelCase__ )
a_ = bp_outa * self.wkj.T - self.thre_bpa
a_ = self.sig(UpperCAmelCase__ )
produce_out.extend(bp_outa.getA().tolist() )
a_ = [list(map(self.do_round , UpperCAmelCase__ ) ) for each in produce_out]
return np.asarray(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Optional[int]:
# return the data of image after convoluting process so we can check it out
a_ = np.asmatrix(UpperCAmelCase__ )
a_ , a_ = self.convolute(
UpperCAmelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a_ = self.pooling(UpperCAmelCase__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 697 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _snake_case ( snake_case ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str:
super().__init__(
features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = Generator(
cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
a_ = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
a_ = None
a_ = None
a_ = None
a_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
a_ = self.builder.as_dataset(
split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 697 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "blenderbot-small"
_UpperCamelCase = ["past_key_values"]
_UpperCamelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCAmelCase__=5_0265 , UpperCAmelCase__=512 , UpperCAmelCase__=8 , UpperCAmelCase__=2048 , UpperCAmelCase__=16 , UpperCAmelCase__=8 , UpperCAmelCase__=2048 , UpperCAmelCase__=16 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__="gelu" , UpperCAmelCase__=512 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1 , UpperCAmelCase__=False , UpperCAmelCase__=0 , UpperCAmelCase__=1 , UpperCAmelCase__=2 , UpperCAmelCase__=2 , **UpperCAmelCase__ , ) -> str:
a_ = vocab_size
a_ = max_position_embeddings
a_ = d_model
a_ = encoder_ffn_dim
a_ = encoder_layers
a_ = encoder_attention_heads
a_ = decoder_ffn_dim
a_ = decoder_layers
a_ = decoder_attention_heads
a_ = dropout
a_ = attention_dropout
a_ = activation_dropout
a_ = activation_function
a_ = init_std
a_ = encoder_layerdrop
a_ = decoder_layerdrop
a_ = use_cache
a_ = encoder_layers
a_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
class _snake_case ( snake_case ):
"""simple docstring"""
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
a_ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
a_ = {0: 'batch'}
a_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
a_ = {0: 'batch', 1: 'decoder_sequence'}
a_ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
a_ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
a_ , a_ = self.num_layers
for i in range(UpperCAmelCase__ ):
a_ = {0: 'batch', 2: 'past_sequence + sequence'}
a_ = {0: 'batch', 2: 'past_sequence + sequence'}
else:
a_ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
a_ = super().outputs
else:
a_ = super(UpperCAmelCase__ , self ).outputs
if self.use_past:
a_ , a_ = self.num_layers
for i in range(UpperCAmelCase__ ):
a_ = {0: 'batch', 2: 'past_sequence + sequence'}
a_ = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = -1 , UpperCAmelCase__ = -1 , UpperCAmelCase__ = False , UpperCAmelCase__ = None , ) -> Mapping[str, Any]:
a_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Generate decoder inputs
a_ = seq_length if not self.use_past else 1
a_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
a_ = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
a_ = dict(**UpperCAmelCase__ , **UpperCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
a_ , a_ = common_inputs['input_ids'].shape
a_ = common_inputs['decoder_input_ids'].shape[1]
a_ , a_ = self.num_attention_heads
a_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a_ = decoder_seq_length + 3
a_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
a_ = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCAmelCase__ , UpperCAmelCase__ )] , dim=1 )
a_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
a_ , a_ = self.num_layers
a_ = min(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = max(UpperCAmelCase__ , UpperCAmelCase__ ) - min_num_layers
a_ = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCAmelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCAmelCase__ ),
torch.zeros(UpperCAmelCase__ ),
torch.zeros(UpperCAmelCase__ ),
torch.zeros(UpperCAmelCase__ ),
) )
# TODO: test this.
a_ = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCAmelCase__ , UpperCAmelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase__ ), torch.zeros(UpperCAmelCase__ )) )
return common_inputs
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = -1 , UpperCAmelCase__ = -1 , UpperCAmelCase__ = False , UpperCAmelCase__ = None , ) -> Mapping[str, Any]:
a_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
a_ , a_ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
a_ = seqlen + 2
a_ , a_ = self.num_layers
a_ , a_ = self.num_attention_heads
a_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a_ = common_inputs['attention_mask'].dtype
a_ = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCAmelCase__ , UpperCAmelCase__ , dtype=UpperCAmelCase__ )] , dim=1 )
a_ = [
(torch.zeros(UpperCAmelCase__ ), torch.zeros(UpperCAmelCase__ )) for _ in range(UpperCAmelCase__ )
]
return common_inputs
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = -1 , UpperCAmelCase__ = -1 , UpperCAmelCase__ = False , UpperCAmelCase__ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a_ = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a_ = tokenizer.num_special_tokens_to_add(UpperCAmelCase__ )
a_ = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
a_ = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
a_ = dict(tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ ) )
return common_inputs
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = -1 , UpperCAmelCase__ = -1 , UpperCAmelCase__ = False , UpperCAmelCase__ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
a_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
elif self.task == "causal-lm":
a_ = self._generate_dummy_inputs_for_causal_lm(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
else:
a_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
return common_inputs
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
if self.task in ["default", "seq2seq-lm"]:
a_ = super()._flatten_past_key_values_(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
else:
a_ = super(UpperCAmelCase__ , self )._flatten_past_key_values_(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
| 697 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase =numpy.array([0, 0])
__lowerCAmelCase =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase =numpy.array([1, 0])
__lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(_UpperCAmelCase ):
a_ = iteration_step(_UpperCAmelCase )
return vectors
def a ( _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(_UpperCAmelCase )
a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 697 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__lowerCAmelCase =logging.getLogger(__name__)
class _snake_case ( snake_case ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ) -> int:
super().__init__(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , index=UpperCAmelCase__ , init_retrieval=UpperCAmelCase__ , )
a_ = None
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Dict:
logger.info('initializing retrieval' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized' )
# needs to be set manually
a_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
a_ = str(distributed_port + 1 )
a_ = dist.new_group(ranks=UpperCAmelCase__ , backend='gloo' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return dist.get_rank(group=self.process_group ) == 0
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=torch.floataa ) -> int:
a_ = torch.empty(UpperCAmelCase__ , dtype=UpperCAmelCase__ )
dist.scatter(UpperCAmelCase__ , src=0 , scatter_list=UpperCAmelCase__ , group=self.process_group )
return target_tensor
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
a_ = next((addr for addr in addrs if addr.startswith('e' )) , UpperCAmelCase__ )
return ifname
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
a_ , a_ = self._main_retrieve(UpperCAmelCase__ , UpperCAmelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase__ )
# distributed training
a_ = dist.get_world_size(group=self.process_group )
# gather logic
a_ = None
if self._is_main():
a_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCAmelCase__ )]
dist.gather(torch.tensor(UpperCAmelCase__ ) , dst=0 , gather_list=UpperCAmelCase__ , group=self.process_group )
# scatter logic
a_ = question_hidden_states.shape[0]
a_ = []
a_ = []
if self._is_main():
assert len(UpperCAmelCase__ ) == world_size
a_ , a_ = self._main_retrieve(torch.cat(UpperCAmelCase__ ).numpy() , UpperCAmelCase__ )
a_ , a_ = torch.tensor(UpperCAmelCase__ ), torch.tensor(UpperCAmelCase__ )
a_ = self._chunk_tensor(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = self._chunk_tensor(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = self._scattered(UpperCAmelCase__ , [n_queries, n_docs] , target_type=torch.intaa )
a_ = self._scattered(UpperCAmelCase__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase__ )
| 697 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def a ( _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
a_ = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = set()
for token in tokens:
a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
a_ = list(_UpperCAmelCase )
return word_list
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
a_ = bert_tokens
a_ , a_ = 0, len(_UpperCAmelCase )
while start < end:
a_ = True
if is_chinese(bert_word[start] ):
a_ = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
a_ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ = '##' + bert_word[j]
a_ = start + i
a_ = False
break
if single_word:
start += 1
return bert_word
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
a_ = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
a_ = []
for id in input_ids:
a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
a_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
a_ = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a_ = f.readlines()
a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ = LTP(args.ltp ) # faster in GPU device
a_ = BertTokenizer.from_pretrained(args.bert )
a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__lowerCAmelCase =parser.parse_args()
main(args)
| 697 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = tempfile.mkdtemp()
a_ = BlipImageProcessor()
a_ = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
a_ = BlipProcessor(UpperCAmelCase__ , UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase__ ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).tokenizer
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase__ ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a_ = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
a_ = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
a_ = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
a_ = self.prepare_image_inputs()
a_ = image_processor(UpperCAmelCase__ , return_tensors='np' )
a_ = processor(images=UpperCAmelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
a_ = 'lower newer'
a_ = processor(text=UpperCAmelCase__ )
a_ = tokenizer(UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
a_ = 'lower newer'
a_ = self.prepare_image_inputs()
a_ = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
a_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a_ = processor.batch_decode(UpperCAmelCase__ )
a_ = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
a_ = 'lower newer'
a_ = self.prepare_image_inputs()
a_ = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 697 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": 512,
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = RetriBertTokenizer
_UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**UpperCAmelCase__ )
a_ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str:
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 697 | 1 |
'''simple docstring'''
import math
import qiskit
def a ( _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
or isinstance(_UpperCAmelCase , _UpperCAmelCase )
or isinstance(_UpperCAmelCase , _UpperCAmelCase )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(_UpperCAmelCase ) != input_a)
or (math.floor(_UpperCAmelCase ) != input_a)
or (math.floor(_UpperCAmelCase ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
a_ = qiskit.QuantumRegister(4 , 'qr' )
a_ = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
a_ = [input_a, input_a, carry_in]
a_ = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_UpperCAmelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_UpperCAmelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_UpperCAmelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _UpperCAmelCase ) # measure the last two qbits
a_ = qiskit.Aer.get_backend('aer_simulator' )
a_ = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_0_0_0 )
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 697 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "lilt"
def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = classifier_dropout
a_ = channel_shrink_ratio
a_ = max_ad_position_embeddings
| 697 | 1 |
'''simple docstring'''
def a ( _UpperCAmelCase = 1_0_0_0 ) -> int:
"""simple docstring"""
a_ = 2**power
a_ = 0
while n:
a_ , a_ = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 697 |
'''simple docstring'''
from __future__ import annotations
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# We need to create solution object to save path.
a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase )
if solved:
print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
a_ = 1
return True
a_ = (not i < 0) and (not j < 0) # Check lower bounds
a_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
a_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
a_ = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase )
):
return True
a_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 1 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> list:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
for i in range(1 , _UpperCAmelCase ):
a_ = collection[i]
a_ = 0
a_ = i - 1
while low <= high:
a_ = (low + high) // 2
if val < collection[mid]:
a_ = mid - 1
else:
a_ = mid + 1
for j in range(_UpperCAmelCase , _UpperCAmelCase , -1 ):
a_ = collection[j - 1]
a_ = val
return collection
if __name__ == "__main__":
__lowerCAmelCase =input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase =[int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 697 |
'''simple docstring'''
__lowerCAmelCase ={
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
__lowerCAmelCase ={
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
"""simple docstring"""
a_ = from_type.lower().strip('s' )
a_ = to_type.lower().strip('s' )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
a_ = METRIC_CONVERSION[from_sanitized]
a_ = METRIC_CONVERSION[to_sanitized]
a_ = 1
if from_exponent > to_exponent:
a_ = from_exponent - to_exponent
else:
a_ = -(to_exponent - from_exponent)
return value * pow(1_0 , _UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _snake_case ( snake_case ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase__ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(UpperCAmelCase__ , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(UpperCAmelCase__ , 'num_attention_heads' ) )
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=32 , UpperCAmelCase__=2 , UpperCAmelCase__=3 , UpperCAmelCase__=640 , UpperCAmelCase__=4 , UpperCAmelCase__="silu" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=10 , UpperCAmelCase__=None , ) -> Union[str, Any]:
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = last_hidden_size
a_ = num_attention_heads
a_ = hidden_act
a_ = conv_kernel_size
a_ = output_stride
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = classifier_dropout_prob
a_ = use_labels
a_ = is_training
a_ = num_labels
a_ = initializer_range
a_ = scope
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.num_labels )
a_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
a_ = MobileViTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]:
a_ = self.num_labels
a_ = MobileViTForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
a_ = self.num_labels
a_ = MobileViTForSemanticSegmentation(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
a_ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ , a_ = config_and_inputs
a_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCamelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = MobileViTModelTester(self )
a_ = MobileViTConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
def check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
a_ = outputs.hidden_states
a_ = 5
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
a_ = 2
for i in range(len(UpperCAmelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase__ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = MobileViTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a ( ) -> List[str]:
"""simple docstring"""
a_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(UpperCAmelCase__ )
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
a_ = model(**UpperCAmelCase__ )
# verify the logits
a_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
a_ = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a_ = model.to(UpperCAmelCase__ )
a_ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a_ = prepare_img()
a_ = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
a_ = model(**UpperCAmelCase__ )
a_ = outputs.logits
# verify the logits
a_ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCAmelCase__ )
a_ = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=UpperCAmelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a_ = model.to(UpperCAmelCase__ )
a_ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a_ = prepare_img()
a_ = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
a_ = model(**UpperCAmelCase__ )
a_ = outputs.logits.detach().cpu()
a_ = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase__ , target_sizes=[(50, 60)] )
a_ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase__ )
a_ = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase__ )
a_ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase__ )
| 697 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase ="naver-clova-ix/donut-base"
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
a_ = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
a_ = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 1 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
"""simple docstring"""
a_ = tmp_path / 'cache'
a_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a_ = JsonDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase ).read()
_check_json_dataset(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
a_ = tmp_path / 'cache'
a_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ = features.copy() if features else default_expected_features
a_ = (
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a_ = JsonDatasetReader(_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
_check_json_dataset(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
"""simple docstring"""
a_ = tmp_path / 'cache'
a_ = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
a_ = features.copy() if features else default_expected_features
a_ = (
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a_ = JsonDatasetReader(_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
a_ = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
a_ = features.copy()
a_ = (
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a_ = tmp_path / 'cache'
a_ = JsonDatasetReader(_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
a_ = tmp_path / 'cache'
a_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ = JsonDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase , split=_UpperCAmelCase ).read()
_check_json_dataset(_UpperCAmelCase , _UpperCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
if issubclass(_UpperCAmelCase , _UpperCAmelCase ):
a_ = jsonl_path
elif issubclass(_UpperCAmelCase , _UpperCAmelCase ):
a_ = [jsonl_path]
a_ = tmp_path / 'cache'
a_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ = JsonDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
_check_json_dataset(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=("train",) ) -> Dict:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
for split in splits:
a_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
a_ = tmp_path / 'cache'
a_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a_ = JsonDatasetReader({'train': jsonl_path} , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase ).read()
_check_json_datasetdict(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
a_ = tmp_path / 'cache'
a_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ = features.copy() if features else default_expected_features
a_ = (
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a_ = JsonDatasetReader({'train': jsonl_path} , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
_check_json_datasetdict(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
if split:
a_ = {split: jsonl_path}
else:
a_ = 'train'
a_ = {'train': jsonl_path, 'test': jsonl_path}
a_ = tmp_path / 'cache'
a_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a_ = JsonDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
_check_json_datasetdict(_UpperCAmelCase , _UpperCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
return json.load(_UpperCAmelCase )
def a ( _UpperCAmelCase ) -> str:
"""simple docstring"""
return [json.loads(_UpperCAmelCase ) for line in buffer]
class _snake_case :
"""simple docstring"""
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase__ , UpperCAmelCase__ , lines=UpperCAmelCase__ ).write()
buffer.seek(0 )
a_ = load_json_function(UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert isinstance(exported_content[0] , UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase__ , UpperCAmelCase__ , lines=UpperCAmelCase__ , orient=UpperCAmelCase__ ).write()
buffer.seek(0 )
a_ = load_json(UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCAmelCase__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCAmelCase__ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase__ , UpperCAmelCase__ , lines=UpperCAmelCase__ , num_proc=2 ).write()
buffer.seek(0 )
a_ = load_json_function(UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert isinstance(exported_content[0] , UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase__ , UpperCAmelCase__ , lines=UpperCAmelCase__ , orient=UpperCAmelCase__ , num_proc=2 ).write()
buffer.seek(0 )
a_ = load_json(UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCAmelCase__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCAmelCase__ ) == 10
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Optional[int]:
with pytest.raises(UpperCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
a_ = tmp_path_factory.mktemp('data' ) / F'''test.json.{extension}'''
a_ = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCAmelCase__ , UpperCAmelCase__ , compression=UpperCAmelCase__ ).write()
with fsspec.open(UpperCAmelCase__ , 'rb' , compression='infer' ) as f:
a_ = f.read()
with fsspec.open(UpperCAmelCase__ , 'rb' , compression='infer' ) as f:
a_ = f.read()
assert exported_content == original_content
| 697 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]:
a_ = parent
a_ = out_indices if out_indices is not None else [4]
a_ = stage_names
a_ = out_features
a_ = backbone
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = use_pretrained_backbone
a_ = is_training
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = TimmBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ = config_and_inputs
a_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (TimmBackbone,) if is_torch_available() else ()
_UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = TimmBackboneModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = 'resnet18'
a_ = 'microsoft/resnet-18'
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
a_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
a_ = self.all_model_classes[0]
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = model(**UpperCAmelCase__ )
a_ = outputs[0][-1]
# Encoder-/Decoder-only models
a_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = None
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = False
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
| 697 | 1 |
'''simple docstring'''
import qiskit
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> qiskit.result.counts.Counts:
"""simple docstring"""
a_ = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
a_ = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
a_ = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 697 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]:
a_ = size if size is not None else {'height': 18, 'width': 18}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = apply_ocr
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# with apply_OCR = True
a_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
a_ = Image.open(ds[0]['file'] ).convert('RGB' )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 697 | 1 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
a_ = AlbertConfig.from_json_file(_UpperCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
a_ = AlbertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 697 |
'''simple docstring'''
import math
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
a_ = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
a_ = []
a_ = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 1 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = AutoConfig.from_pretrained(_UpperCAmelCase )
a_ = FlaxAutoModelForSeqaSeqLM.from_config(config=_UpperCAmelCase )
a_ = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
a_ = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
a_ = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
a_ = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
a_ = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
a_ = F'''layers_{str(_UpperCAmelCase )}'''
# Self-Attention
a_ = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
a_ = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
a_ = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
a_ = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
a_ = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
a_ = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
a_ = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
a_ = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
a_ = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
a_ = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
a_ = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
a_ = flax_model.params['encoder']['block'][str(_UpperCAmelCase )]['layer']
a_ = tax_attention_key
a_ = tax_attention_out
a_ = tax_attention_query
a_ = tax_attention_value
a_ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
a_ = tax_global_layer_norm
if split_mlp_wi:
a_ = tax_mlp_wi_a
a_ = tax_mlp_wi_a
else:
a_ = tax_mlp_wi
a_ = tax_mlp_wo
a_ = tax_mlp_layer_norm
a_ = flax_model_encoder_layer_block
# Only for layer 0:
a_ = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
a_ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
a_ = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
a_ = tax_encoder_global_rel_embedding
# Assigning
a_ = tax_model['target']['encoder']['encoder_norm']['scale']
a_ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
a_ = F'''layers_{str(_UpperCAmelCase )}'''
# Self-Attention
a_ = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
a_ = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
a_ = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
a_ = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
a_ = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
a_ = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
a_ = tax_enc_dec_attention_module['key']['kernel']
a_ = tax_enc_dec_attention_module['out']['kernel']
a_ = tax_enc_dec_attention_module['query']['kernel']
a_ = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
a_ = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
a_ = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
a_ = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
a_ = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
a_ = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
a_ = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
a_ = flax_model.params['decoder']['block'][str(_UpperCAmelCase )]['layer']
a_ = tax_attention_key
a_ = tax_attention_out
a_ = tax_attention_query
a_ = tax_attention_value
a_ = tax_pre_attention_layer_norm
a_ = tax_enc_dec_attention_key
a_ = tax_enc_dec_attention_out
a_ = tax_enc_dec_attention_query
a_ = tax_enc_dec_attention_value
a_ = tax_cross_layer_norm
if split_mlp_wi:
a_ = tax_mlp_wi_a
a_ = tax_mlp_wi_a
else:
a_ = tax_mlp_wi
a_ = tax_mlp_wo
a_ = txa_mlp_layer_norm
a_ = flax_model_decoder_layer_block
# Decoder Normalization
a_ = tax_model['target']['decoder']['decoder_norm']['scale']
a_ = txa_decoder_norm
# Only for layer 0:
a_ = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
a_ = tax_decoder_rel_embedding
# Token Embeddings
a_ = tax_model['target']['token_embedder']['embedding']
a_ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
a_ = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(_UpperCAmelCase )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
__lowerCAmelCase =parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 697 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = 10
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = [1, 2, 3, 4]
a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = ''
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
a_ , a_ = process_story(UpperCAmelCase__ )
a_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = ['It was the best of times.']
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = torch.tensor([1, 2, 3, 4] )
a_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = 101
a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase ={
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class _snake_case ( unittest.TestCase , snake_case ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = load_tool('text-question-answering' )
self.tool.setup()
a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
| 697 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
a_ = LxmertConfig.from_json_file(_UpperCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
a_ = LxmertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCAmelCase ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "vit"
def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict:
super().__init__(**UpperCAmelCase__ )
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = encoder_stride
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 697 | 1 |
'''simple docstring'''
from math import sqrt
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
a_ = True
# 0 and 1 are none primes.
if number <= 1:
a_ = False
for divisor in range(2 , int(round(sqrt(_UpperCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
a_ = False
break
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'status' must been from type bool"
return status
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
a_ = list(range(2 , n + 1 ) )
a_ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_UpperCAmelCase ) ):
for j in range(i + 1 , len(_UpperCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
a_ = 0
# filters actual prime numbers.
a_ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list"
return ans
def a ( _UpperCAmelCase ) -> List[str]:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
a_ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_UpperCAmelCase ):
ans.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list"
return ans
def a ( _UpperCAmelCase ) -> List[str]:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
a_ = [] # this list will be returns of the function.
# potential prime number factors.
a_ = 2
a_ = number
if number == 0 or number == 1:
ans.append(_UpperCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_UpperCAmelCase ):
while quotient != 1:
if is_prime(_UpperCAmelCase ) and (quotient % factor == 0):
ans.append(_UpperCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list"
return ans
def a ( _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
a_ = 0
# prime factorization of 'number'
a_ = prime_factorization(_UpperCAmelCase )
a_ = max(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type int"
return ans
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
a_ = 0
# prime factorization of 'number'
a_ = prime_factorization(_UpperCAmelCase )
a_ = min(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type int"
return ans
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _UpperCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def a ( _UpperCAmelCase ) -> List[str]:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _UpperCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (number > 2) and is_even(_UpperCAmelCase )
), "'number' must been an int, even and > 2"
a_ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
a_ = get_prime_numbers(_UpperCAmelCase )
a_ = len(_UpperCAmelCase )
# run variable for while-loops.
a_ = 0
a_ = None
# exit variable. for break up the loops
a_ = True
while i < len_pn and loop:
a_ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
a_ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (len(_UpperCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
a_ = 0
while numbera != 0:
a_ = numbera % numbera
a_ = numbera
a_ = rest
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
a_ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
a_ = prime_factorization(_UpperCAmelCase )
a_ = prime_factorization(_UpperCAmelCase )
elif numbera == 1 or numbera == 1:
a_ = []
a_ = []
a_ = max(_UpperCAmelCase , _UpperCAmelCase )
a_ = 0
a_ = 0
a_ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
a_ = prime_fac_a.count(_UpperCAmelCase )
a_ = prime_fac_a.count(_UpperCAmelCase )
for _ in range(max(_UpperCAmelCase , _UpperCAmelCase ) ):
ans *= n
else:
a_ = prime_fac_a.count(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
ans *= n
done.append(_UpperCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
a_ = prime_fac_a.count(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
ans *= n
done.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( _UpperCAmelCase ) -> Any:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'number' must been a positive int"
a_ = 0
a_ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_UpperCAmelCase ):
ans += 1
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and is_prime(
_UpperCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
"""simple docstring"""
assert (
is_prime(_UpperCAmelCase ) and is_prime(_UpperCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
a_ = p_number_a + 1 # jump to the next number
a_ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_UpperCAmelCase ):
number += 1
while number < p_number_a:
ans.append(_UpperCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(_UpperCAmelCase ):
number += 1
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and ans[0] != p_number_a
and ans[len(_UpperCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( _UpperCAmelCase ) -> Dict:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
a_ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_UpperCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(_UpperCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
a_ = get_divisors(_UpperCAmelCase )
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (divisors[0] == 1)
and (divisors[len(_UpperCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
a_ = gcd(abs(_UpperCAmelCase ) , abs(_UpperCAmelCase ) )
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
a_ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( _UpperCAmelCase ) -> Dict:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
a_ = 0
a_ = 1
a_ = 1 # this will be return
for _ in range(n - 1 ):
a_ = ans
ans += fiba
a_ = tmp
return ans
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase = 5_0 ) -> int:
"""simple docstring"""
a_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 1 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__lowerCAmelCase =""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class _snake_case ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ = " " ) -> Any:
a_ = sentence_delimiter
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Dict:
return list(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> int:
a_ = []
for sent_idx, sentence in enumerate(UpperCAmelCase__ ):
chars.extend(self.process_string(UpperCAmelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCAmelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
__lowerCAmelCase =tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__lowerCAmelCase =tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__lowerCAmelCase ="\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
__lowerCAmelCase ="\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
__lowerCAmelCase ="\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=False ) -> Dict:
if concatenate_texts:
return jiwer.compute_measures(
UpperCAmelCase__ , UpperCAmelCase__ , truth_transform=UpperCAmelCase__ , hypothesis_transform=UpperCAmelCase__ , )["wer"]
a_ = 0
a_ = 0
for prediction, reference in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
a_ = jiwer.compute_measures(
UpperCAmelCase__ , UpperCAmelCase__ , truth_transform=UpperCAmelCase__ , hypothesis_transform=UpperCAmelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def a ( ) -> None:
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 697 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ="▁"
__lowerCAmelCase ={
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
__lowerCAmelCase ={
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
__lowerCAmelCase ={
"facebook/s2t-small-librispeech-asr": 1024,
}
__lowerCAmelCase =["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
__lowerCAmelCase ={"mustc": MUSTC_LANGS}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = MAX_MODEL_INPUT_SIZES
_UpperCamelCase = ["input_ids", "attention_mask"]
_UpperCamelCase = []
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__="<s>" , UpperCAmelCase__="</s>" , UpperCAmelCase__="<pad>" , UpperCAmelCase__="<unk>" , UpperCAmelCase__=False , UpperCAmelCase__=False , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> None:
a_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , do_upper_case=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ , lang_codes=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
a_ = do_upper_case
a_ = do_lower_case
a_ = load_json(UpperCAmelCase__ )
a_ = {v: k for k, v in self.encoder.items()}
a_ = spm_file
a_ = load_spm(UpperCAmelCase__ , self.sp_model_kwargs )
if lang_codes is not None:
a_ = lang_codes
a_ = LANGUAGES[lang_codes]
a_ = [F'''<lang:{lang}>''' for lang in self.langs]
a_ = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
a_ = self.lang_tokens
a_ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
a_ = {}
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return len(self.encoder )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return self._tgt_lang
@tgt_lang.setter
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> None:
a_ = new_tgt_lang
self.set_tgt_lang_special_tokens(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> None:
a_ = self.lang_code_to_id[tgt_lang]
a_ = [lang_code_id]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Tuple:
return self.encoder.get(UpperCAmelCase__ , self.encoder[self.unk_token] )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> str:
return self.decoder.get(UpperCAmelCase__ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> str:
a_ = []
a_ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
a_ = self.sp_model.decode(UpperCAmelCase__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
a_ = []
else:
current_sub_tokens.append(UpperCAmelCase__ )
a_ = self.sp_model.decode(UpperCAmelCase__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
a_ = [1] * len(self.prefix_tokens )
a_ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase__ )) + ([0] * len(UpperCAmelCase__ )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
a_ = self.__dict__.copy()
a_ = None
return state
def __setstate__( self , UpperCAmelCase__ ) -> None:
a_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
a_ = {}
a_ = load_spm(self.spm_file , self.sp_model_kwargs )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
a_ = Path(UpperCAmelCase__ )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
a_ = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
a_ = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , UpperCAmelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , UpperCAmelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(UpperCAmelCase__ , 'wb' ) as fi:
a_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (str(UpperCAmelCase__ ), str(UpperCAmelCase__ ))
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
a_ = sentencepiece.SentencePieceProcessor(**_UpperCAmelCase )
spm.Load(str(_UpperCAmelCase ) )
return spm
def a ( _UpperCAmelCase ) -> Union[Dict, List]:
"""simple docstring"""
with open(_UpperCAmelCase , 'r' ) as f:
return json.load(_UpperCAmelCase )
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> None:
"""simple docstring"""
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=2 )
| 697 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _snake_case ( snake_case ):
"""simple docstring"""
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str:
a_ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' )
download_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = model
a_ = cache
a_ = force
a_ = trust_remote_code
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase ={
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
assert column_title.isupper()
a_ = 0
a_ = len(_UpperCAmelCase ) - 1
a_ = 0
while index >= 0:
a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCAmelCase =logging.get_logger(__name__)
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = ["pixel_values"]
def __init__( self , UpperCAmelCase__ = True , UpperCAmelCase__ = 32 , UpperCAmelCase__=PILImageResampling.BILINEAR , UpperCAmelCase__ = True , **UpperCAmelCase__ , ) -> None:
a_ = do_resize
a_ = do_rescale
a_ = size_divisor
a_ = resample
super().__init__(**UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ ) -> np.ndarray:
a_ , a_ = get_image_size(UpperCAmelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
a_ = height // size_divisor * size_divisor
a_ = width // size_divisor * size_divisor
a_ = resize(UpperCAmelCase__ , (new_h, new_w) , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
return image
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ ) -> np.ndarray:
return rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__=None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = ChannelDimension.FIRST , **UpperCAmelCase__ , ) -> BatchFeature:
a_ = do_resize if do_resize is not None else self.do_resize
a_ = do_rescale if do_rescale is not None else self.do_rescale
a_ = size_divisor if size_divisor is not None else self.size_divisor
a_ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a_ = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a_ = [to_numpy_array(UpperCAmelCase__ ) for img in images]
if do_resize:
a_ = [self.resize(UpperCAmelCase__ , size_divisor=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
a_ = [self.rescale(UpperCAmelCase__ , scale=1 / 255 ) for image in images]
a_ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
a_ = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 697 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase =logging.get_logger(__name__)
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]:
super().__init__(
feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = spectrogram_length
a_ = num_channels
a_ = patch_size
a_ = feature_size // self.patch_size[1]
a_ = n_fft
a_ = sampling_rate // hop_length_to_sampling_rate
a_ = sampling_rate
a_ = padding_value
a_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray:
a_ = spectrogram(
UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
a_ = log_spec[:, :-1]
a_ = log_spec - 2_0.0
a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a_ = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a_ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCAmelCase__ ):
a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a_ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a_ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a_ = np.array(UpperCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a_ = padded_audio_features * self.padding_value
for i in range(len(UpperCAmelCase__ ) ):
a_ = audio_features[i]
a_ = feature
# return as BatchFeature
if return_attention_mask:
a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
a_ = {'audio_values': padded_audio_features}
a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
return encoded_inputs
| 697 | 1 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
a_ = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase__ )
a_ = -1
a_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
a_ = model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ )
a_ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
a_ = TextStreamer(UpperCAmelCase__ )
model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a_ = cs.out[:-1]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
a_ = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase__ )
a_ = -1
a_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
a_ = model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ )
a_ = tokenizer.decode(greedy_ids[0] )
a_ = TextIteratorStreamer(UpperCAmelCase__ )
a_ = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
a_ = Thread(target=model.generate , kwargs=UpperCAmelCase__ )
thread.start()
a_ = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
a_ = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase__ )
a_ = -1
a_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
a_ = model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ )
a_ = greedy_ids[:, input_ids.shape[1] :]
a_ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
a_ = TextStreamer(UpperCAmelCase__ , skip_prompt=UpperCAmelCase__ )
model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a_ = cs.out[:-1]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
a_ = AutoTokenizer.from_pretrained('distilgpt2' )
a_ = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(UpperCAmelCase__ )
a_ = -1
a_ = torch.ones((1, 5) , device=UpperCAmelCase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
a_ = TextStreamer(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
model.generate(UpperCAmelCase__ , max_new_tokens=1 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
a_ = cs.out[:-1] # Remove the final "\n"
a_ = tokenizer(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
a_ = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase__ )
a_ = -1
a_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
a_ = TextIteratorStreamer(UpperCAmelCase__ , timeout=0.0_0_1 )
a_ = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
a_ = Thread(target=model.generate , kwargs=UpperCAmelCase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCAmelCase__ ):
a_ = ''
for new_text in streamer:
streamer_text += new_text
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
a_ = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
a_ = [element for element in set_a if element in set_b]
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
a_ = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
__lowerCAmelCase ={"a", "b", "c", "d", "e"}
__lowerCAmelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 697 | 1 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "x" , _UpperCAmelCase = 1_0**-1_0 , _UpperCAmelCase = 1 , ) -> complex:
"""simple docstring"""
a_ = symbols(_UpperCAmelCase )
a_ = lambdify(_UpperCAmelCase , _UpperCAmelCase )
a_ = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
a_ = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
a_ = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
a_ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}''')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f'''{newton_raphson('exp(x) - 1', 10, precision=0.005)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
a_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 1 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> str:
"""simple docstring"""
return "".join(chr(ord(_UpperCAmelCase ) - 3_2 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _snake_case ( snake_case ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str:
super().__init__(
features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = Generator(
cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
a_ = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
a_ = None
a_ = None
a_ = None
a_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
a_ = self.builder.as_dataset(
split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 697 | 1 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowerCAmelCase =1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=16 , UpperCAmelCase__=13 , UpperCAmelCase__=7 , UpperCAmelCase__=14 , UpperCAmelCase__=10 , UpperCAmelCase__=19 , UpperCAmelCase__=5 , UpperCAmelCase__=4 , UpperCAmelCase__=True , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=4 , UpperCAmelCase__=4 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=[1, 2, 3, 4, 5] , UpperCAmelCase__=25 , UpperCAmelCase__=5 , ) -> List[Any]:
a_ = d_model
a_ = parent
a_ = batch_size
a_ = prediction_length
a_ = context_length
a_ = cardinality
a_ = num_time_features
a_ = lags_sequence
a_ = embedding_dimension
a_ = is_training
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = context_length
a_ = prediction_length + label_length
a_ = label_length
a_ = moving_average
a_ = autocorrelation_factor
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> List[str]:
a_ = config.context_length + max(config.lags_sequence )
a_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
a_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
a_ = floats_tensor([self.batch_size, _past_length] )
a_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
a_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
a_ = floats_tensor([self.batch_size, config.prediction_length] )
a_ = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.get_config()
a_ = self.prepare_autoformer_inputs_dict(UpperCAmelCase__ )
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ , a_ = self.prepare_config_and_inputs()
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = AutoformerModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).eval()
a_ = model(**UpperCAmelCase__ )
a_ = outputs.encoder_last_hidden_state
a_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
a_ = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase__ )
a_ = AutoformerEncoder.from_pretrained(UpperCAmelCase__ ).to(UpperCAmelCase__ )
a_ , a_ , a_ , a_ , a_ = model.create_network_inputs(**UpperCAmelCase__ )
a_ , a_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
a_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
a_ = encoder(inputs_embeds=UpperCAmelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
a_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
a_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
a_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
a_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase__ )
a_ = AutoformerDecoder.from_pretrained(UpperCAmelCase__ ).to(UpperCAmelCase__ )
a_ = decoder(
trend=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCamelCase = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCamelCase = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = AutoformerModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ , a_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase__ )
a_ , a_ = model_class.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ )
self.assertEqual(info['missing_keys'] , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase__ )
@unittest.skip(reason='Model has no tokens embeddings' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = inspect.signature(getattr(UpperCAmelCase__ , 'forward' ) )
# The main input is the name of the argument after `self`
a_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(UpperCAmelCase__ )] , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
a_ = getattr(self.model_tester , 'seq_length' , UpperCAmelCase__ )
a_ = getattr(self.model_tester , 'decoder_seq_length' , UpperCAmelCase__ )
a_ = getattr(self.model_tester , 'encoder_seq_length' , UpperCAmelCase__ )
a_ = getattr(self.model_tester , 'd_model' , UpperCAmelCase__ )
a_ = getattr(self.model_tester , 'num_attention_heads' , UpperCAmelCase__ )
a_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
a_ = True
a_ = False
a_ = True
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
a_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a_ = True
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
a_ = outputs.encoder_attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
a_ = len(UpperCAmelCase__ )
a_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# decoder attentions
a_ = outputs.decoder_attentions
self.assertIsInstance(UpperCAmelCase__ , (list, tuple) )
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
a_ = outputs.cross_attentions
self.assertIsInstance(UpperCAmelCase__ , (list, tuple) )
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
a_ = True
a_ = True
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCAmelCase__ ) )
a_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
super().test_retain_grad_hidden_states_attentions()
def a ( _UpperCAmelCase="train-batch.pt" ) -> List[Any]:
"""simple docstring"""
a_ = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=_UpperCAmelCase , repo_type='dataset' )
a_ = torch.load(_UpperCAmelCase , map_location=_UpperCAmelCase )
return batch
@require_torch
@slow
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(UpperCAmelCase__ )
a_ = prepare_batch()
with torch.no_grad():
a_ = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
a_ = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
a_ = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=UpperCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(UpperCAmelCase__ )
a_ = prepare_batch('val-batch.pt' )
with torch.no_grad():
a_ = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
a_ = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCAmelCase__ )
a_ = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=UpperCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(UpperCAmelCase__ )
a_ = prepare_batch('val-batch.pt' )
with torch.no_grad():
a_ = model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
a_ = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCAmelCase__ )
a_ = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=UpperCAmelCase__ )
a_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCAmelCase__ , rtol=1e-1 ) )
| 697 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase =numpy.array([0, 0])
__lowerCAmelCase =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase =numpy.array([1, 0])
__lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(_UpperCAmelCase ):
a_ = iteration_step(_UpperCAmelCase )
return vectors
def a ( _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(_UpperCAmelCase )
a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 697 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = BioGptTokenizer
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
a_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
a_ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(UpperCAmelCase__ ) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Optional[Any]:
a_ = 'lower newer'
a_ = 'lower newer'
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = BioGptTokenizer(self.vocab_file , self.merges_file )
a_ = 'lower'
a_ = ['low', 'er</w>']
a_ = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = tokens + ['<unk>']
a_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
a_ = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase__ )
a_ = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase__ )
a_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
a_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 697 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def a ( _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
a_ = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = set()
for token in tokens:
a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
a_ = list(_UpperCAmelCase )
return word_list
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
a_ = bert_tokens
a_ , a_ = 0, len(_UpperCAmelCase )
while start < end:
a_ = True
if is_chinese(bert_word[start] ):
a_ = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
a_ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ = '##' + bert_word[j]
a_ = start + i
a_ = False
break
if single_word:
start += 1
return bert_word
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
a_ = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
a_ = []
for id in input_ids:
a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
a_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
a_ = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a_ = f.readlines()
a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ = LTP(args.ltp ) # faster in GPU device
a_ = BertTokenizer.from_pretrained(args.bert )
a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__lowerCAmelCase =parser.parse_args()
main(args)
| 697 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase ="\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"
__lowerCAmelCase ="\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n"
__lowerCAmelCase ="\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=False ) -> Tuple:
if return_pvalue:
a_ = pearsonr(UpperCAmelCase__ , UpperCAmelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCAmelCase__ , UpperCAmelCase__ )[0] )}
| 697 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": 512,
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = RetriBertTokenizer
_UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**UpperCAmelCase__ )
a_ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str:
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 697 | 1 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__lowerCAmelCase =HUGGINGFACE_HUB_CACHE
__lowerCAmelCase ="config.json"
__lowerCAmelCase ="diffusion_pytorch_model.bin"
__lowerCAmelCase ="diffusion_flax_model.msgpack"
__lowerCAmelCase ="model.onnx"
__lowerCAmelCase ="diffusion_pytorch_model.safetensors"
__lowerCAmelCase ="weights.pb"
__lowerCAmelCase ="https://huggingface.co"
__lowerCAmelCase =default_cache_path
__lowerCAmelCase ="diffusers_modules"
__lowerCAmelCase =os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__lowerCAmelCase =["fp16", "non-ema"]
__lowerCAmelCase =".self_attn"
| 697 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "lilt"
def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = classifier_dropout
a_ = channel_shrink_ratio
a_ = max_ad_position_embeddings
| 697 | 1 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
super().__init__()
a_ = nn.Linear(3 , 4 )
a_ = nn.BatchNormad(4 )
a_ = nn.Linear(4 , 5 )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) )
class _snake_case ( snake_case ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> str:
return (args[0] + 1,) + args[1:], kwargs
class _snake_case ( snake_case ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
return output + 1
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = ModelForTest()
a_ = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(test_model._hf_hook , UpperCAmelCase__ )
self.assertTrue(hasattr(UpperCAmelCase__ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , '_hf_hook' ) )
self.assertFalse(hasattr(UpperCAmelCase__ , '_old_forward' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = ModelForTest()
a_ = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ , append=UpperCAmelCase__ )
self.assertEqual(isinstance(test_model._hf_hook , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(UpperCAmelCase__ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , '_hf_hook' ) )
self.assertFalse(hasattr(UpperCAmelCase__ , '_old_forward' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = ModelForTest()
a_ = torch.randn(2 , 3 )
a_ = test_model(x + 1 )
a_ = test_model(x + 2 )
a_ = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
a_ = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
a_ = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = ModelForTest()
a_ = torch.randn(2 , 3 )
a_ = test_model(UpperCAmelCase__ )
a_ = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
a_ = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
a_ = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , output + 2 , atol=1e-5 )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = ModelForTest()
a_ = torch.randn(2 , 3 )
a_ = test_model(UpperCAmelCase__ )
a_ = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
a_ = True
a_ = test_model(UpperCAmelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
a_ = torch.randn(2 , 3 )
a_ = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(UpperCAmelCase__ , AlignDevicesHook(io_same_device=UpperCAmelCase__ ) )
a_ = torch.randn(2 , 3 ).to(0 )
a_ = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(0 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
a_ = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
a_ = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
a_ = torch.randn(2 , 3 )
a_ = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
a_ = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
a_ = torch.randn(2 , 3 )
a_ = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
a_ = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
a_ = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
a_ = torch.randn(2 , 3 )
a_ = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , offload_buffers=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
a_ = torch.randn(2 , 3 )
a_ = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
a_ = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
a_ = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
a_ = torch.randn(2 , 3 )
a_ = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() , offload_buffers=UpperCAmelCase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
a_ = torch.randn(2 , 3 )
a_ = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 697 |
'''simple docstring'''
from __future__ import annotations
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# We need to create solution object to save path.
a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase )
if solved:
print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
a_ = 1
return True
a_ = (not i < 0) and (not j < 0) # Check lower bounds
a_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
a_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
a_ = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase )
):
return True
a_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 1 |
'''simple docstring'''
import math
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
a_ = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
a_ = []
a_ = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 |
'''simple docstring'''
__lowerCAmelCase ={
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
__lowerCAmelCase ={
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
"""simple docstring"""
a_ = from_type.lower().strip('s' )
a_ = to_type.lower().strip('s' )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
a_ = METRIC_CONVERSION[from_sanitized]
a_ = METRIC_CONVERSION[to_sanitized]
a_ = 1
if from_exponent > to_exponent:
a_ = from_exponent - to_exponent
else:
a_ = -(to_exponent - from_exponent)
return value * pow(1_0 , _UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 1 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def a ( _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
a_ = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = set()
for token in tokens:
a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
a_ = list(_UpperCAmelCase )
return word_list
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
a_ = bert_tokens
a_ , a_ = 0, len(_UpperCAmelCase )
while start < end:
a_ = True
if is_chinese(bert_word[start] ):
a_ = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
a_ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ = '##' + bert_word[j]
a_ = start + i
a_ = False
break
if single_word:
start += 1
return bert_word
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
a_ = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
a_ = []
for id in input_ids:
a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
a_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
a_ = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a_ = f.readlines()
a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ = LTP(args.ltp ) # faster in GPU device
a_ = BertTokenizer.from_pretrained(args.bert )
a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__lowerCAmelCase =parser.parse_args()
main(args)
| 697 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase ="naver-clova-ix/donut-base"
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
a_ = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
a_ = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 1 |
'''simple docstring'''
from __future__ import annotations
import bisect
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0 , _UpperCAmelCase = -1 ) -> int:
"""simple docstring"""
if hi < 0:
a_ = len(_UpperCAmelCase )
while lo < hi:
a_ = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
a_ = mid + 1
else:
a_ = mid
return lo
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0 , _UpperCAmelCase = -1 ) -> int:
"""simple docstring"""
if hi < 0:
a_ = len(_UpperCAmelCase )
while lo < hi:
a_ = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
a_ = mid + 1
else:
a_ = mid
return lo
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0 , _UpperCAmelCase = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_left(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0 , _UpperCAmelCase = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_right(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int | None:
"""simple docstring"""
a_ = 0
a_ = len(_UpperCAmelCase ) - 1
while left <= right:
a_ = left + (right - left) // 2
a_ = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
a_ = midpoint - 1
else:
a_ = midpoint + 1
return None
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int | None:
"""simple docstring"""
a_ = bisect.bisect_left(_UpperCAmelCase , _UpperCAmelCase )
if index != len(_UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int | None:
"""simple docstring"""
if right < left:
return None
a_ = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCAmelCase , _UpperCAmelCase , midpoint + 1 , _UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =input("Enter numbers separated by comma:\n").strip()
__lowerCAmelCase =sorted(int(item) for item in user_input.split(","))
__lowerCAmelCase =int(input("Enter a single number to be found in the list:\n"))
__lowerCAmelCase =binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 697 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]:
a_ = parent
a_ = out_indices if out_indices is not None else [4]
a_ = stage_names
a_ = out_features
a_ = backbone
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = use_pretrained_backbone
a_ = is_training
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = TimmBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ = config_and_inputs
a_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (TimmBackbone,) if is_torch_available() else ()
_UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = TimmBackboneModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = 'resnet18'
a_ = 'microsoft/resnet-18'
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
a_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
a_ = self.all_model_classes[0]
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = model(**UpperCAmelCase__ )
a_ = outputs[0][-1]
# Encoder-/Decoder-only models
a_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = None
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = False
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
| 697 | 1 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__lowerCAmelCase =importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__lowerCAmelCase =[
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a ( _UpperCAmelCase ) -> str:
"""simple docstring"""
if "://" in dataset_path:
a_ = dataset_path.split('://' )[1]
return dataset_path
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
a_ = not is_remote_filesystem(_UpperCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_UpperCAmelCase ) , fs._strip_protocol(_UpperCAmelCase ) )
else:
fs.mv(_UpperCAmelCase , _UpperCAmelCase , recursive=_UpperCAmelCase )
def a ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
a_ = None
a_ = None
a_ = threading.Lock()
| 697 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]:
a_ = size if size is not None else {'height': 18, 'width': 18}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = apply_ocr
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# with apply_OCR = True
a_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
a_ = Image.open(ds[0]['file'] ).convert('RGB' )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 697 | 1 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> float:
"""simple docstring"""
a_ = 0
while len(_UpperCAmelCase ) > 1:
a_ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
a_ = files.index(min(_UpperCAmelCase ) )
temp += files[min_index]
files.pop(_UpperCAmelCase )
files.append(_UpperCAmelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 |
'''simple docstring'''
import math
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
a_ = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
a_ = []
a_ = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__=1 / 255 , UpperCAmelCase__=True , UpperCAmelCase__=[0.5, 0.5, 0.5] , UpperCAmelCase__=[0.5, 0.5, 0.5] , UpperCAmelCase__=True , ) -> Optional[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = do_rescale
a_ = rescale_factor
a_ = do_normalize
a_ = image_mean
a_ = image_std
a_ = do_pad
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=False ) -> Optional[Any]:
if not batched:
a_ = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image ):
a_ , a_ = image.size
else:
a_ , a_ = image.shape[1], image.shape[2]
if w < h:
a_ = int(self.size['shortest_edge'] * h / w )
a_ = self.size['shortest_edge']
elif w > h:
a_ = self.size['shortest_edge']
a_ = int(self.size['shortest_edge'] * w / h )
else:
a_ = self.size['shortest_edge']
a_ = self.size['shortest_edge']
else:
a_ = []
for image in image_inputs:
a_ , a_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0]
a_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = DetrImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = DetrImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_rescale' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'rescale_factor' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_pad' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
a_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# prepare image and target
a_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
a_ = json.loads(f.read() )
a_ = {'image_id': 3_9769, 'annotations': target}
# encode them
a_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
a_ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='pt' )
# verify pixel values
a_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , UpperCAmelCase__ )
a_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
# verify area
a_ = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCAmelCase__ ) )
# verify boxes
a_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCAmelCase__ )
a_ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCAmelCase__ , atol=1e-3 ) )
# verify image_id
a_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCAmelCase__ ) )
# verify is_crowd
a_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCAmelCase__ ) )
# verify class_labels
a_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCAmelCase__ ) )
# verify orig_size
a_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCAmelCase__ ) )
# verify size
a_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCAmelCase__ ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# prepare image, target and masks_path
a_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
a_ = json.loads(f.read() )
a_ = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
a_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
a_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
a_ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='pt' )
# verify pixel values
a_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , UpperCAmelCase__ )
a_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
# verify area
a_ = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCAmelCase__ ) )
# verify boxes
a_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCAmelCase__ )
a_ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCAmelCase__ , atol=1e-3 ) )
# verify image_id
a_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCAmelCase__ ) )
# verify is_crowd
a_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCAmelCase__ ) )
# verify class_labels
a_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCAmelCase__ ) )
# verify masks
a_ = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , UpperCAmelCase__ )
# verify orig_size
a_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCAmelCase__ ) )
# verify size
a_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCAmelCase__ ) )
| 697 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = 10
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = [1, 2, 3, 4]
a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = ''
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
a_ , a_ = process_story(UpperCAmelCase__ )
a_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = ['It was the best of times.']
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = torch.tensor([1, 2, 3, 4] )
a_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = 101
a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=14 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=99 , UpperCAmelCase__=32 , UpperCAmelCase__=4 , UpperCAmelCase__=4 , UpperCAmelCase__=4 , UpperCAmelCase__=37 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=0.0_2 , ) -> List[str]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = rotary_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = initializer_range
a_ = None
a_ = vocab_size - 1
a_ = vocab_size - 1
a_ = vocab_size - 1
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length] )
a_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
a_ = 20
a_ = model_class_name(UpperCAmelCase__ )
a_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase__ )
a_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
a_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
a_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , )
a_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
a_ = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase__ , )
a_ = model(UpperCAmelCase__ )
a_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
a_ = 20
a_ = model_class_name(UpperCAmelCase__ )
a_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
a_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase__ )
a_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
a_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , )
a_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
a_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , )
a_ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
a_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
_UpperCamelCase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = FlaxGPTJModelTester(self )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
for model_class_name in self.all_model_classes:
a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@tooslow
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
a_ = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ )
a_ = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
a_ = False
a_ = model.config.eos_token_id
a_ = jax.jit(model.generate )
a_ = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
a_ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
a_ = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@is_pt_flax_cross_test
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
a_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
a_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
a_ , a_ = pt_inputs['input_ids'].shape
a_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase__ ):
a_ = 0
a_ = 1
a_ = 0
a_ = 1
a_ = pt_model_class(UpperCAmelCase__ ).eval()
a_ = model_class(UpperCAmelCase__ , dtype=jnp.floataa )
a_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase__ )
a_ = fx_state
with torch.no_grad():
a_ = pt_model(**UpperCAmelCase__ ).to_tuple()
a_ = fx_model(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase__ )
a_ = model_class.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
a_ = fx_model_loaded(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
a_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
a_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = pt_model_class(UpperCAmelCase__ ).eval()
a_ = model_class(UpperCAmelCase__ , dtype=jnp.floataa )
a_ = load_flax_weights_in_pytorch_model(UpperCAmelCase__ , fx_model.params )
a_ , a_ = pt_inputs['input_ids'].shape
a_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase__ ):
a_ = 0
a_ = 1
a_ = 0
a_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
a_ = pt_model(**UpperCAmelCase__ ).to_tuple()
a_ = fx_model(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase__ )
a_ = pt_model_class.from_pretrained(UpperCAmelCase__ , from_flax=UpperCAmelCase__ )
with torch.no_grad():
a_ = pt_model_loaded(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
a_ = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
a_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 697 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class _snake_case ( unittest.TestCase , snake_case ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = load_tool('text-question-answering' )
self.tool.setup()
a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
| 697 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "spiece.model"}
__lowerCAmelCase ={
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
__lowerCAmelCase ={
"google/bigbird-roberta-base": 4096,
"google/bigbird-roberta-large": 4096,
"google/bigbird-base-trivia-itc": 4096,
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ["input_ids", "attention_mask"]
_UpperCamelCase = []
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__="<unk>" , UpperCAmelCase__="<s>" , UpperCAmelCase__="</s>" , UpperCAmelCase__="<pad>" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> None:
a_ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else bos_token
a_ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else eos_token
a_ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else unk_token
a_ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else pad_token
a_ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else cls_token
a_ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a_ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
a_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
a_ = vocab_file
a_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
a_ = self.__dict__.copy()
a_ = None
return state
def __setstate__( self , UpperCAmelCase__ ) -> Any:
a_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
a_ = {}
a_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Tuple:
return self.sp_model.piece_to_id(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> List[str]:
a_ = self.sp_model.IdToPiece(UpperCAmelCase__ )
return token
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> List[Any]:
a_ = []
a_ = ''
a_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase__ ) + token
a_ = True
a_ = []
else:
current_sub_tokens.append(UpperCAmelCase__ )
a_ = False
out_string += self.sp_model.decode(UpperCAmelCase__ )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = True , **UpperCAmelCase__ , ) -> str:
a_ = kwargs.pop('use_source_tokenizer' , UpperCAmelCase__ )
a_ = self.convert_ids_to_tokens(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
a_ = []
a_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) )
a_ = []
sub_texts.append(UpperCAmelCase__ )
else:
current_sub_text.append(UpperCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
a_ = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(UpperCAmelCase__ ) )
else:
a_ = ''.join(UpperCAmelCase__ )
a_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
a_ = self.clean_up_tokenization(UpperCAmelCase__ )
return clean_text
else:
return text
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a_ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , 'wb' ) as fi:
a_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ = [self.cls_token_id]
a_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ )) + [1]
return [1] + ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "vit"
def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict:
super().__init__(**UpperCAmelCase__ )
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = encoder_stride
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 697 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "vit"
def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict:
super().__init__(**UpperCAmelCase__ )
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = encoder_stride
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 697 | 1 |
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__lowerCAmelCase =[
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
for attribute in key.split('.' ):
a_ = getattr(_UpperCAmelCase , _UpperCAmelCase )
if weight_type is not None:
a_ = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape
else:
a_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a_ = value
elif weight_type == "weight_g":
a_ = value
elif weight_type == "weight_v":
a_ = value
elif weight_type == "bias":
a_ = value
else:
a_ = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
a_ = []
a_ = fairseq_model.state_dict()
a_ = hf_model.feature_extractor
a_ = hf_model.adapter
for name, value in fairseq_dict.items():
a_ = False
if "conv_layers" in name:
load_conv_layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
a_ = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
a_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a_ = True
if "*" in mapped_key:
a_ = name.split(_UpperCAmelCase )[0].split('.' )[-2]
a_ = mapped_key.replace('*' , _UpperCAmelCase )
if "weight_g" in name:
a_ = 'weight_g'
elif "weight_v" in name:
a_ = 'weight_v'
elif "bias" in name:
a_ = 'bias'
elif "weight" in name:
a_ = 'weight'
else:
a_ = None
set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
continue
if not is_used:
unused_weights.append(_UpperCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
a_ = full_name.split('conv_layers.' )[-1]
a_ = name.split('.' )
a_ = int(items[0] )
a_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_UpperCAmelCase )
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
a_ = full_name.split('adaptor.' )[-1]
a_ = name.split('.' )
if items[1].isdigit():
a_ = int(items[1] )
else:
a_ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
a_ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(_UpperCAmelCase )
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
a_ , a_ = emb.weight.shape
a_ = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
a_ = emb.weight.data
return lin_layer
@torch.no_grad()
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> str:
"""simple docstring"""
a_ = WavaVecaConfig.from_pretrained(
_UpperCAmelCase , add_adapter=_UpperCAmelCase , adapter_stride=_UpperCAmelCase , adapter_kernel_size=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , output_hidden_size=_UpperCAmelCase , )
a_ = MBartConfig.from_pretrained(_UpperCAmelCase )
# load model
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
a_ = model[0].eval()
# load feature extractor
a_ = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase , use_auth_token=_UpperCAmelCase )
# set weights for wav2vec2 encoder
a_ = WavaVecaModel(_UpperCAmelCase )
recursively_load_weights_wavaveca(model.encoder , _UpperCAmelCase )
# load decoder weights
a_ = MBartForCausalLM(_UpperCAmelCase )
a_ , a_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_UpperCAmelCase )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
a_ = SpeechEncoderDecoderModel(encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
a_ = False
a_ = MBartaaTokenizer(_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
a_ = hf_wavavec.config.to_dict()
a_ = tokenizer.pad_token_id
a_ = tokenizer.bos_token_id
a_ = tokenizer.eos_token_id
a_ = 'mbart50'
a_ = 'wav2vec2'
a_ = tokenizer.eos_token_id
a_ = 2_5_0_0_0_4
a_ = tokenizer.eos_token_id
a_ = SpeechEncoderDecoderConfig.from_dict(_UpperCAmelCase )
hf_wavavec.save_pretrained(_UpperCAmelCase )
feature_extractor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=25_0004, type=int, help="`decoder_start_token_id` of model config")
__lowerCAmelCase =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase = 5_0 ) -> int:
"""simple docstring"""
a_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 1 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__lowerCAmelCase =True
except (ImportError, ModuleNotFoundError):
__lowerCAmelCase =False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def a ( _UpperCAmelCase ) -> str:
"""simple docstring"""
re.sub('<n>' , '' , _UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_UpperCAmelCase ) )
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def a ( ) -> None:
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 697 | 1 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__lowerCAmelCase ="\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
__lowerCAmelCase ="\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
__lowerCAmelCase ="\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
__lowerCAmelCase ="\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
__lowerCAmelCase ="The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=[1, 10, 100] , UpperCAmelCase__=4 , UpperCAmelCase__=3.0 ) -> Optional[int]:
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=UpperCAmelCase__ ) as executor:
a_ = []
a_ = Counter()
a_ = 0
a_ = defaultdict(UpperCAmelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ):
for candidate in candidates:
a_ = candidate + '\n' + test_case
a_ = (test_program, timeout, task_id, completion_id[task_id])
a_ = executor.submit(UpperCAmelCase__ , *UpperCAmelCase__ )
futures.append(UpperCAmelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(UpperCAmelCase__ ):
a_ = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
a_ , a_ = [], []
for result in results.values():
result.sort()
a_ = [r[1]['passed'] for r in result]
total.append(len(UpperCAmelCase__ ) )
correct.append(sum(UpperCAmelCase__ ) )
a_ = np.array(UpperCAmelCase__ )
a_ = np.array(UpperCAmelCase__ )
a_ = k
a_ = {F'''pass@{k}''': estimate_pass_at_k(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
def estimator(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = itertools.repeat(_UpperCAmelCase , len(_UpperCAmelCase ) )
else:
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = iter(_UpperCAmelCase )
return np.array([estimator(int(_UpperCAmelCase ) , int(_UpperCAmelCase ) , _UpperCAmelCase ) for n, c in zip(_UpperCAmelCase , _UpperCAmelCase )] )
| 697 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _snake_case ( snake_case ):
"""simple docstring"""
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str:
a_ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' )
download_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = model
a_ = cache
a_ = force
a_ = trust_remote_code
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase ={"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
assert column_title.isupper()
a_ = 0
a_ = len(_UpperCAmelCase ) - 1
a_ = 0
while index >= 0:
a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 1 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__lowerCAmelCase =yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
__lowerCAmelCase ={
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
__lowerCAmelCase ="\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCAmelCase ="\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCAmelCase ={
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
__lowerCAmelCase ="\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCAmelCase =(
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
__lowerCAmelCase ="\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCAmelCase =(
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
__lowerCAmelCase ="\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCAmelCase ="The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
__lowerCAmelCase ="\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCAmelCase ="The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
__lowerCAmelCase ="\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
__lowerCAmelCase ="The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
__lowerCAmelCase ="\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
__lowerCAmelCase ="The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
__lowerCAmelCase ="\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
__lowerCAmelCase ="The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
__lowerCAmelCase ="\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCAmelCase ="The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
__lowerCAmelCase ="\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
__lowerCAmelCase ="The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
__lowerCAmelCase ="\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCAmelCase ="The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
__lowerCAmelCase =""
__lowerCAmelCase ="The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
__lowerCAmelCase ="\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCAmelCase ="The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
assert ReadMe.from_string(_UpperCAmelCase , _UpperCAmelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
with pytest.raises(_UpperCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
a_ = ReadMe.from_string(_UpperCAmelCase , _UpperCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
with pytest.raises(_UpperCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
ReadMe.from_string(_UpperCAmelCase , _UpperCAmelCase , suppress_parsing_errors=_UpperCAmelCase )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
a_ = Path(_UpperCAmelCase ) / 'README.md'
with open(_UpperCAmelCase , 'w+' ) as readme_file:
readme_file.write(_UpperCAmelCase )
a_ = ReadMe.from_readme(_UpperCAmelCase , _UpperCAmelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
a_ = Path(_UpperCAmelCase ) / 'README.md'
with open(_UpperCAmelCase , 'w+' ) as readme_file:
readme_file.write(_UpperCAmelCase )
a_ = expected_error.format(path=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase , match=re.escape(_UpperCAmelCase ) ):
a_ = ReadMe.from_readme(_UpperCAmelCase , _UpperCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
a_ = Path(_UpperCAmelCase ) / 'README.md'
with open(_UpperCAmelCase , 'w+' ) as readme_file:
readme_file.write(_UpperCAmelCase )
a_ = expected_error.format(path=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase , match=re.escape(_UpperCAmelCase ) ):
ReadMe.from_readme(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def a ( _UpperCAmelCase ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
a_ = Path(_UpperCAmelCase ) / 'README.md'
with open(_UpperCAmelCase , 'w+' ) as readme_file:
readme_file.write(_UpperCAmelCase )
ReadMe.from_readme(_UpperCAmelCase , _UpperCAmelCase , suppress_parsing_errors=_UpperCAmelCase )
| 697 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase =logging.get_logger(__name__)
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]:
super().__init__(
feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = spectrogram_length
a_ = num_channels
a_ = patch_size
a_ = feature_size // self.patch_size[1]
a_ = n_fft
a_ = sampling_rate // hop_length_to_sampling_rate
a_ = sampling_rate
a_ = padding_value
a_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray:
a_ = spectrogram(
UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
a_ = log_spec[:, :-1]
a_ = log_spec - 2_0.0
a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a_ = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a_ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCAmelCase__ ):
a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a_ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a_ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a_ = np.array(UpperCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a_ = padded_audio_features * self.padding_value
for i in range(len(UpperCAmelCase__ ) ):
a_ = audio_features[i]
a_ = feature
# return as BatchFeature
if return_attention_mask:
a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
a_ = {'audio_values': padded_audio_features}
a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
return encoded_inputs
| 697 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__=0 ) -> Tuple:
a_ = np.random.RandomState(UpperCAmelCase__ )
a_ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = self.get_dummy_inputs()
a_ = pipe(**UpperCAmelCase__ ).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
a_ = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = self.get_dummy_inputs()
a_ = pipe(**UpperCAmelCase__ ).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
a_ = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = self.get_dummy_inputs()
a_ = pipe(**UpperCAmelCase__ ).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
a_ = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = self.get_dummy_inputs()
a_ = pipe(**UpperCAmelCase__ ).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
a_ = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = self.get_dummy_inputs()
a_ = pipe(**UpperCAmelCase__ ).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
a_ = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = self.get_dummy_inputs()
a_ = pipe(**UpperCAmelCase__ ).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
a_ = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = self.get_dummy_inputs()
a_ = 3 * [inputs['prompt']]
# forward
a_ = pipe(**UpperCAmelCase__ )
a_ = output.images[0, -3:, -3:, -1]
a_ = self.get_dummy_inputs()
a_ = 3 * [inputs.pop('prompt' )]
a_ = pipe.tokenizer(
UpperCAmelCase__ , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=UpperCAmelCase__ , return_tensors='np' , )
a_ = text_inputs['input_ids']
a_ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
a_ = prompt_embeds
# forward
a_ = pipe(**UpperCAmelCase__ )
a_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = self.get_dummy_inputs()
a_ = 3 * ['this is a negative prompt']
a_ = negative_prompt
a_ = 3 * [inputs['prompt']]
# forward
a_ = pipe(**UpperCAmelCase__ )
a_ = output.images[0, -3:, -3:, -1]
a_ = self.get_dummy_inputs()
a_ = 3 * [inputs.pop('prompt' )]
a_ = []
for p in [prompt, negative_prompt]:
a_ = pipe.tokenizer(
UpperCAmelCase__ , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=UpperCAmelCase__ , return_tensors='np' , )
a_ = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
a_ , a_ = embeds
# forward
a_ = pipe(**UpperCAmelCase__ )
a_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = ort.SessionOptions()
a_ = False
return options
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# using the PNDM scheduler by default
a_ = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = 'A painting of a squirrel eating a burger'
np.random.seed(0 )
a_ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='np' )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a_ = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
a_ = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = 'open neural network exchange'
a_ = np.random.RandomState(0 )
a_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase__ , output_type='np' )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a_ = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
a_ = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = 'open neural network exchange'
a_ = np.random.RandomState(0 )
a_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase__ , output_type='np' )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a_ = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = 0
def test_callback_fn(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> None:
a_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
a_ = latents[0, -3:, -3:, -1]
a_ = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
a_ = latents[0, -3:, -3:, -1]
a_ = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
a_ = False
a_ = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
a_ = 'Andromeda galaxy in a bottle'
a_ = np.random.RandomState(0 )
pipe(
prompt=UpperCAmelCase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert pipe.safety_checker is None
a_ = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase__ )
a_ = OnnxStableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a_ = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
a_ = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
a_ = [element for element in set_a if element in set_b]
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
a_ = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
__lowerCAmelCase ={"a", "b", "c", "d", "e"}
__lowerCAmelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 697 | 1 |
'''simple docstring'''
import sys
__lowerCAmelCase =(
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def a ( _UpperCAmelCase = N ) -> int:
"""simple docstring"""
a_ = -sys.maxsize - 1
for i in range(len(_UpperCAmelCase ) - 1_2 ):
a_ = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
a_ = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
a_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 1 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> list:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
a_ , a_ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__lowerCAmelCase =list(range(10, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 697 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _snake_case ( snake_case ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str:
super().__init__(
features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = Generator(
cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
a_ = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
a_ = None
a_ = None
a_ = None
a_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
a_ = self.builder.as_dataset(
split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 697 | 1 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def a ( _UpperCAmelCase ) -> List[str]:
"""simple docstring"""
for i in range(0 , _UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def a ( _UpperCAmelCase ) -> Dict:
"""simple docstring"""
for i in range(_UpperCAmelCase , 0 , -1 ):
for _ in range(_UpperCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def a ( _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(_UpperCAmelCase ) # upper half
reverse_floyd(_UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
__lowerCAmelCase =1
while K:
__lowerCAmelCase =int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__lowerCAmelCase =int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 697 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase =numpy.array([0, 0])
__lowerCAmelCase =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase =numpy.array([1, 0])
__lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(_UpperCAmelCase ):
a_ = iteration_step(_UpperCAmelCase )
return vectors
def a ( _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(_UpperCAmelCase )
a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 697 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "efficientnet"
def __init__( self , UpperCAmelCase__ = 3 , UpperCAmelCase__ = 600 , UpperCAmelCase__ = 2.0 , UpperCAmelCase__ = 3.1 , UpperCAmelCase__ = 8 , UpperCAmelCase__ = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase__ = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase__ = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase__ = [] , UpperCAmelCase__ = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase__ = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase__ = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase__ = 0.2_5 , UpperCAmelCase__ = "swish" , UpperCAmelCase__ = 2560 , UpperCAmelCase__ = "mean" , UpperCAmelCase__ = 0.0_2 , UpperCAmelCase__ = 0.0_0_1 , UpperCAmelCase__ = 0.9_9 , UpperCAmelCase__ = 0.5 , UpperCAmelCase__ = 0.2 , **UpperCAmelCase__ , ) -> Union[str, Any]:
super().__init__(**UpperCAmelCase__ )
a_ = num_channels
a_ = image_size
a_ = width_coefficient
a_ = depth_coefficient
a_ = depth_divisor
a_ = kernel_sizes
a_ = in_channels
a_ = out_channels
a_ = depthwise_padding
a_ = strides
a_ = num_block_repeats
a_ = expand_ratios
a_ = squeeze_expansion_ratio
a_ = hidden_act
a_ = hidden_dim
a_ = pooling_type
a_ = initializer_range
a_ = batch_norm_eps
a_ = batch_norm_momentum
a_ = dropout_rate
a_ = drop_connect_rate
a_ = sum(UpperCAmelCase__ ) * 4
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-5
| 697 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def a ( _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
a_ = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = set()
for token in tokens:
a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
a_ = list(_UpperCAmelCase )
return word_list
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
a_ = bert_tokens
a_ , a_ = 0, len(_UpperCAmelCase )
while start < end:
a_ = True
if is_chinese(bert_word[start] ):
a_ = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
a_ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ = '##' + bert_word[j]
a_ = start + i
a_ = False
break
if single_word:
start += 1
return bert_word
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
a_ = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
a_ = []
for id in input_ids:
a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
a_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
a_ = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a_ = f.readlines()
a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ = LTP(args.ltp ) # faster in GPU device
a_ = BertTokenizer.from_pretrained(args.bert )
a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__lowerCAmelCase =parser.parse_args()
main(args)
| 697 | 1 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase ="naver-clova-ix/donut-base"
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
a_ = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
a_ = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": 512,
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = RetriBertTokenizer
_UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**UpperCAmelCase__ )
a_ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str:
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 697 | 1 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('check_bouncy() accepts only integer arguments' )
a_ = str(_UpperCAmelCase )
a_ = ''.join(sorted(_UpperCAmelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def a ( _UpperCAmelCase = 9_9 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_0_0:
raise ValueError('solution() only accepts values from 0 to 100' )
a_ = 0
a_ = 1
while True:
if check_bouncy(_UpperCAmelCase ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 697 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "lilt"
def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = classifier_dropout
a_ = channel_shrink_ratio
a_ = max_ad_position_embeddings
| 697 | 1 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> float:
"""simple docstring"""
def get_matched_characters(_UpperCAmelCase , _UpperCAmelCase ) -> str:
a_ = []
a_ = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a_ = int(max(0 , i - limit ) )
a_ = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCAmelCase )
a_ = F'''{_stra[0:_stra.index(_UpperCAmelCase )]} {_stra[_stra.index(_UpperCAmelCase ) + 1:]}'''
return "".join(_UpperCAmelCase )
# matching characters
a_ = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
a_ = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
a_ = len(_UpperCAmelCase )
# transposition
a_ = (
len([(ca, ca) for ca, ca in zip(_UpperCAmelCase , _UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
a_ = 0.0
else:
a_ = (
1
/ 3
* (
match_count / len(_UpperCAmelCase )
+ match_count / len(_UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a_ = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 697 |
'''simple docstring'''
from __future__ import annotations
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# We need to create solution object to save path.
a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase )
if solved:
print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
a_ = 1
return True
a_ = (not i < 0) and (not j < 0) # Check lower bounds
a_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
a_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
a_ = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase )
):
return True
a_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 1 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ="T5Config"
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "mt5"
_UpperCamelCase = MTaConfig
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "mt5"
_UpperCamelCase = MTaConfig
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "mt5"
_UpperCamelCase = MTaConfig
| 697 |
'''simple docstring'''
__lowerCAmelCase ={
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
__lowerCAmelCase ={
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
"""simple docstring"""
a_ = from_type.lower().strip('s' )
a_ = to_type.lower().strip('s' )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
a_ = METRIC_CONVERSION[from_sanitized]
a_ = METRIC_CONVERSION[to_sanitized]
a_ = 1
if from_exponent > to_exponent:
a_ = from_exponent - to_exponent
else:
a_ = -(to_exponent - from_exponent)
return value * pow(1_0 , _UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 1 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__lowerCAmelCase =logging.get_logger(__name__) # pylint: disable=invalid-name
class _snake_case ( snake_case ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) -> Optional[Any]:
super().__init__()
self.register_modules(
vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ = "auto" ) -> Union[str, Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = 512 , UpperCAmelCase__ = 512 , UpperCAmelCase__ = 50 , UpperCAmelCase__ = 7.5 , UpperCAmelCase__ = None , UpperCAmelCase__ = 1 , UpperCAmelCase__ = 0.0 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = "pil" , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = 1 , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> Tuple:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
a_ = 1
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
a_ = len(UpperCAmelCase__ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(UpperCAmelCase__ )}.''' )
# get prompt text embeddings
a_ = self.tokenizer(
UpperCAmelCase__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
a_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
a_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
a_ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
a_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
a_ , a_ , a_ = text_embeddings.shape
a_ = text_embeddings.repeat(1 , UpperCAmelCase__ , 1 )
a_ = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a_ = 42
if negative_prompt is None:
a_ = ['']
elif type(UpperCAmelCase__ ) is not type(UpperCAmelCase__ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase__ )} !='''
F''' {type(UpperCAmelCase__ )}.''' )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
a_ = [negative_prompt]
elif batch_size != len(UpperCAmelCase__ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase__ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
a_ = negative_prompt
a_ = text_input_ids.shape[-1]
a_ = self.tokenizer(
UpperCAmelCase__ , padding='max_length' , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='pt' , )
a_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
a_ = uncond_embeddings.shape[1]
a_ = uncond_embeddings.repeat(UpperCAmelCase__ , UpperCAmelCase__ , 1 )
a_ = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
a_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
a_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
a_ = torch.randn(
UpperCAmelCase__ , generator=UpperCAmelCase__ , device='cpu' , dtype=UpperCAmelCase__ ).to(self.device )
a_ = torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device='cpu' , dtype=UpperCAmelCase__ ).to(
self.device )
else:
a_ = torch.randn(
UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
a_ = torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
a_ = latents_reference.to(self.device )
a_ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
a_ = (latents_shape[3] - latents_shape_reference[3]) // 2
a_ = (latents_shape[2] - latents_shape_reference[2]) // 2
a_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
a_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
a_ = 0 if dx < 0 else dx
a_ = 0 if dy < 0 else dy
a_ = max(-dx , 0 )
a_ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
a_ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
a_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
a_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a_ = {}
if accepts_eta:
a_ = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
a_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a_ = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
a_ = self.unet(UpperCAmelCase__ , UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
a_ , a_ = noise_pred.chunk(2 )
a_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
a_ = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
a_ = 1 / 0.1_8_2_1_5 * latents
a_ = self.vae.decode(UpperCAmelCase__ ).sample
a_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
a_ = self.feature_extractor(self.numpy_to_pil(UpperCAmelCase__ ) , return_tensors='pt' ).to(
self.device )
a_ , a_ = self.safety_checker(
images=UpperCAmelCase__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
a_ = None
if output_type == "pil":
a_ = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=UpperCAmelCase__ , nsfw_content_detected=UpperCAmelCase__ )
| 697 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase ="naver-clova-ix/donut-base"
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
a_ = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
a_ = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 1 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=14 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=99 , UpperCAmelCase__=32 , UpperCAmelCase__=5 , UpperCAmelCase__=4 , UpperCAmelCase__=37 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=3 , UpperCAmelCase__=4 , UpperCAmelCase__=None , ) -> Optional[int]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_token_type_ids
a_ = use_input_mask
a_ = use_labels
a_ = use_mc_token_ids
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = scope
a_ = self.vocab_size - 1
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length] )
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ = None
if self.use_mc_token_ids:
a_ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ = ids_tensor([self.batch_size] , self.num_choices )
a_ = self.get_config()
a_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ ) -> int:
a_ = CTRLModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
a_ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ ) -> Tuple:
a_ = CTRLLMHeadModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ ) -> Optional[Any]:
a_ = self.num_labels
a_ = CTRLForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
_UpperCamelCase = (CTRLLMHeadModel,) if is_torch_available() else ()
_UpperCamelCase = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = CTRLModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase__ , n_embd=37 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
pass
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> str:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = CTRLModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(UpperCAmelCase__ )
a_ = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=UpperCAmelCase__ ) # Legal the president is
a_ = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a_ = model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase__ )
| 697 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]:
a_ = parent
a_ = out_indices if out_indices is not None else [4]
a_ = stage_names
a_ = out_features
a_ = backbone
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = use_pretrained_backbone
a_ = is_training
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = TimmBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ = config_and_inputs
a_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (TimmBackbone,) if is_torch_available() else ()
_UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = TimmBackboneModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = 'resnet18'
a_ = 'microsoft/resnet-18'
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
a_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
a_ = self.all_model_classes[0]
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = model(**UpperCAmelCase__ )
a_ = outputs[0][-1]
# Encoder-/Decoder-only models
a_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = None
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = False
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
| 697 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__lowerCAmelCase =logging.get_logger(__name__)
class _snake_case ( snake_case ):
"""simple docstring"""
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 697 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]:
a_ = size if size is not None else {'height': 18, 'width': 18}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = apply_ocr
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# with apply_OCR = True
a_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
a_ = Image.open(ds[0]['file'] ).convert('RGB' )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 697 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = ["image_processor", "tokenizer"]
_UpperCamelCase = "AutoImageProcessor"
_UpperCamelCase = "AutoTokenizer"
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = self.image_processor
def __call__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , **UpperCAmelCase__ ) -> Dict:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
a_ = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if images is not None:
a_ = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and images is not None:
a_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> Optional[int]:
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> str:
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
return ["input_ids", "attention_mask", "pixel_values"]
| 697 |
'''simple docstring'''
import math
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
a_ = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
a_ = []
a_ = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 1 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
a_ = []
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
a_ = True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
a_ = False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 697 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = 10
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = [1, 2, 3, 4]
a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = ''
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
a_ , a_ = process_story(UpperCAmelCase__ )
a_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = ['It was the best of times.']
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = torch.tensor([1, 2, 3, 4] )
a_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = 101
a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 1 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__lowerCAmelCase =re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
__lowerCAmelCase =10
__lowerCAmelCase =256
def a ( _UpperCAmelCase ) -> Optional[MinHash]:
"""simple docstring"""
if len(_UpperCAmelCase ) < MIN_NUM_TOKENS:
return None
a_ = MinHash(num_perm=_UpperCAmelCase )
for token in set(_UpperCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def a ( _UpperCAmelCase ) -> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(_UpperCAmelCase ) if len(t.strip() ) > 0}
class _snake_case :
"""simple docstring"""
def __init__( self , *,
UpperCAmelCase__ = 0.8_5 , ) -> Dict:
a_ = duplication_jaccard_threshold
a_ = NUM_PERM
a_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
a_ = defaultdict(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> None:
a_ = self._index.query(UpperCAmelCase__ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(UpperCAmelCase__ , UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(UpperCAmelCase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[List[Dict]]:
a_ = []
for base, duplicates in self._duplicate_clusters.items():
a_ = [base] + list(UpperCAmelCase__ )
# reformat the cluster to be a list of dict
a_ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(UpperCAmelCase__ )
return duplicate_clusters
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> None:
a_ = self.get_duplicate_clusters()
with open(UpperCAmelCase__ , 'w' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def a ( _UpperCAmelCase ) -> str:
"""simple docstring"""
a_ , a_ = element
a_ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def a ( _UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_UpperCAmelCase , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
a_ = DuplicationIndex(duplication_jaccard_threshold=_UpperCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_UpperCAmelCase ) ) , max_queue_size=1_0_0 ) ):
di.add(_UpperCAmelCase , _UpperCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> float:
"""simple docstring"""
a_ = get_tokens(_UpperCAmelCase )
a_ = get_tokens(_UpperCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__lowerCAmelCase =None
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
a_ = []
for elementa in cluster:
a_ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
a_ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_UpperCAmelCase , _UpperCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
a_ = 1
extremes.append(_UpperCAmelCase )
return extremes
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
global _shared_dataset
a_ = dataset
a_ = []
a_ = partial(_find_cluster_extremes_shared , jaccard_threshold=_UpperCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_UpperCAmelCase , _UpperCAmelCase , ) , total=len(_UpperCAmelCase ) , ):
extremes_list.append(_UpperCAmelCase )
return extremes_list
def a ( _UpperCAmelCase , _UpperCAmelCase = 0.8_5 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
a_ = make_duplicate_clusters(_UpperCAmelCase , _UpperCAmelCase )
a_ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
a_ = {}
a_ = find_extremes(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
a_ = element
a_ = duplicate_indices - set(extreme_dict.keys() )
a_ = dataset.filter(lambda _UpperCAmelCase , _UpperCAmelCase : idx not in remove_indices , with_indices=_UpperCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
a_ = element['base_index'] in extreme_dict
if element["is_extreme"]:
a_ = extreme_dict[element['base_index']]['copies']
print(F'''Original dataset size: {len(_UpperCAmelCase )}''' )
print(F'''Number of duplicate clusters: {len(_UpperCAmelCase )}''' )
print(F'''Files in duplicate cluster: {len(_UpperCAmelCase )}''' )
print(F'''Unique files in duplicate cluster: {len(_UpperCAmelCase )}''' )
print(F'''Filtered dataset size: {len(_UpperCAmelCase )}''' )
return ds_filter, duplicate_clusters
| 697 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class _snake_case ( unittest.TestCase , snake_case ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = load_tool('text-question-answering' )
self.tool.setup()
a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
| 697 | 1 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def a ( _UpperCAmelCase , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _snake_case ( snake_case ):
"""simple docstring"""
@staticmethod
@abstractmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def __SCREAMING_SNAKE_CASE ( self ) -> int:
raise NotImplementedError()
| 697 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "vit"
def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict:
super().__init__(**UpperCAmelCase__ )
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = encoder_stride
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 697 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowerCAmelCase =logging.get_logger(__name__)
if is_vision_available():
import PIL
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = ["pixel_values"]
def __init__( self , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = PILImageResampling.BICUBIC , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = 1 / 255 , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = True , **UpperCAmelCase__ , ) -> None:
super().__init__(**UpperCAmelCase__ )
a_ = size if size is not None else {'shortest_edge': 224}
a_ = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
a_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
a_ = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ , param_name='crop_size' )
a_ = do_resize
a_ = size
a_ = resample
a_ = do_center_crop
a_ = crop_size
a_ = do_rescale
a_ = rescale_factor
a_ = do_normalize
a_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a_ = image_std if image_std is not None else OPENAI_CLIP_STD
a_ = do_convert_rgb
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = PILImageResampling.BICUBIC , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> np.ndarray:
a_ = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
a_ = get_resize_output_image_size(UpperCAmelCase__ , size=size['shortest_edge'] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> np.ndarray:
a_ = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCAmelCase__ , size=(size['height'], size['width']) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> Union[str, Any]:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> np.ndarray:
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = ChannelDimension.FIRST , **UpperCAmelCase__ , ) -> PIL.Image.Image:
a_ = do_resize if do_resize is not None else self.do_resize
a_ = size if size is not None else self.size
a_ = get_size_dict(UpperCAmelCase__ , param_name='size' , default_to_square=UpperCAmelCase__ )
a_ = resample if resample is not None else self.resample
a_ = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ = crop_size if crop_size is not None else self.crop_size
a_ = get_size_dict(UpperCAmelCase__ , param_name='crop_size' , default_to_square=UpperCAmelCase__ )
a_ = do_rescale if do_rescale is not None else self.do_rescale
a_ = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ = do_normalize if do_normalize is not None else self.do_normalize
a_ = image_mean if image_mean is not None else self.image_mean
a_ = image_std if image_std is not None else self.image_std
a_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a_ = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a_ = [convert_to_rgb(UpperCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
a_ = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
a_ = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
a_ = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
a_ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
a_ = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
a_ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
a_ = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase = 5_0 ) -> int:
"""simple docstring"""
a_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ = None ) -> None:
if components is None:
a_ = []
a_ = list(UpperCAmelCase__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(UpperCAmelCase__ , self.__components ) ) + ")"
def __add__( self , UpperCAmelCase__ ) -> Vector:
a_ = len(self )
if size == len(UpperCAmelCase__ ):
a_ = [self.__components[i] + other.component(UpperCAmelCase__ ) for i in range(UpperCAmelCase__ )]
return Vector(UpperCAmelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self , UpperCAmelCase__ ) -> Vector:
a_ = len(self )
if size == len(UpperCAmelCase__ ):
a_ = [self.__components[i] - other.component(UpperCAmelCase__ ) for i in range(UpperCAmelCase__ )]
return Vector(UpperCAmelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , UpperCAmelCase__ ) -> Vector:
...
@overload
def __mul__( self , UpperCAmelCase__ ) -> float:
...
def __mul__( self , UpperCAmelCase__ ) -> float | Vector:
if isinstance(UpperCAmelCase__ , (float, int) ):
a_ = [c * other for c in self.__components]
return Vector(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(self ) == len(UpperCAmelCase__ ):
a_ = len(self )
a_ = [self.__components[i] * other.component(UpperCAmelCase__ ) for i in range(UpperCAmelCase__ )]
return sum(UpperCAmelCase__ )
else: # error case
raise Exception('invalid operand!' )
def __SCREAMING_SNAKE_CASE ( self ) -> Vector:
return Vector(self.__components )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> float:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
a_ = value
def __SCREAMING_SNAKE_CASE ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
a_ = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCAmelCase__ ) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = False ) -> float:
a_ = self * other
a_ = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def a ( _UpperCAmelCase ) -> Vector:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
return Vector([0] * dimension )
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Vector:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (isinstance(_UpperCAmelCase , _UpperCAmelCase ))
a_ = [0] * dimension
a_ = 1
return Vector(_UpperCAmelCase )
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Vector:
"""simple docstring"""
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (isinstance(_UpperCAmelCase , (int, float) ))
)
return x * scalar + y
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Vector:
"""simple docstring"""
random.seed(_UpperCAmelCase )
a_ = [random.randint(_UpperCAmelCase , _UpperCAmelCase ) for _ in range(_UpperCAmelCase )]
return Vector(_UpperCAmelCase )
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> None:
a_ = matrix
a_ = w
a_ = h
def __str__( self ) -> str:
a_ = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , UpperCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
a_ = []
for i in range(self.__height ):
a_ = [
self.__matrix[i][j] + other.component(UpperCAmelCase__ , UpperCAmelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCAmelCase__ )
return Matrix(UpperCAmelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , UpperCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
a_ = []
for i in range(self.__height ):
a_ = [
self.__matrix[i][j] - other.component(UpperCAmelCase__ , UpperCAmelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCAmelCase__ )
return Matrix(UpperCAmelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , UpperCAmelCase__ ) -> Matrix:
...
@overload
def __mul__( self , UpperCAmelCase__ ) -> Vector:
...
def __mul__( self , UpperCAmelCase__ ) -> Vector | Matrix:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # matrix-vector
if len(UpperCAmelCase__ ) == self.__width:
a_ = zero_vector(self.__height )
for i in range(self.__height ):
a_ = [
self.__matrix[i][j] * other.component(UpperCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCAmelCase__ , sum(UpperCAmelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(UpperCAmelCase__ , (int, float) ): # matrix-scalar
a_ = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCAmelCase__ , self.__width , self.__height )
return None
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.__height
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.__width
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
a_ = value
else:
raise Exception('change_component: indices out of bounds' )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
a_ = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCAmelCase__ ) ):
a_ = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCAmelCase__ , UpperCAmelCase__ )
else:
raise Exception('Indices out of bounds' )
def __SCREAMING_SNAKE_CASE ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
a_ = [
self.__matrix[0][y] * self.cofactor(0 , UpperCAmelCase__ ) for y in range(self.__width )
]
return sum(UpperCAmelCase__ )
def a ( _UpperCAmelCase ) -> Matrix:
"""simple docstring"""
a_ = [[0] * n for _ in range(_UpperCAmelCase )]
return Matrix(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Matrix:
"""simple docstring"""
random.seed(_UpperCAmelCase )
a_ = [
[random.randint(_UpperCAmelCase , _UpperCAmelCase ) for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )
]
return Matrix(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def a ( ) -> None:
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 697 | 1 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = BarthezTokenizer
_UpperCamelCase = BarthezTokenizerFast
_UpperCamelCase = True
_UpperCamelCase = True
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().setUp()
a_ = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=UpperCAmelCase__ )
a_ = tokenizer
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = '<pad>'
a_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(UpperCAmelCase__ ) , 10_1122 )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a_ = [0, 57, 3018, 7_0307, 91, 2]
a_ = self.tokenizer(
UpperCAmelCase__ , max_length=len(UpperCAmelCase__ ) , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='pt' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
a_ = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = 'I was born in 92000, and this is falsé.'
a_ = tokenizer.tokenize(UpperCAmelCase__ )
a_ = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
a_ = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(UpperCAmelCase__ )
a_ = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# fmt: off
a_ = {'input_ids': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
a_ = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=UpperCAmelCase__ , )
| 697 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _snake_case ( snake_case ):
"""simple docstring"""
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str:
a_ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' )
download_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = model
a_ = cache
a_ = force
a_ = trust_remote_code
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 697 | 1 |
'''simple docstring'''
class _snake_case :
"""simple docstring"""
def __init__( self ) -> List[str]:
a_ = 0
a_ = 0
a_ = {}
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Tuple:
if vertex not in self.adjacency:
a_ = {}
self.num_vertices += 1
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
self.add_vertex(UpperCAmelCase__ )
self.add_vertex(UpperCAmelCase__ )
if head == tail:
return
a_ = weight
a_ = weight
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ = self.get_edges()
for edge in edges:
a_ , a_ , a_ = edge
edges.remove((tail, head, weight) )
for i in range(len(UpperCAmelCase__ ) ):
a_ = list(edges[i] )
edges.sort(key=lambda UpperCAmelCase__ : e[2] )
for i in range(len(UpperCAmelCase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
a_ = edges[i][2] + 1
for edge in edges:
a_ , a_ , a_ = edge
a_ = weight
a_ = weight
def __str__( self ) -> Dict:
a_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
a_ = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip('\n' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.adjacency.keys()
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__=None , UpperCAmelCase__=None ) -> Any:
a_ = Graph()
if vertices is None:
a_ = []
if edges is None:
a_ = []
for vertex in vertices:
g.add_vertex(UpperCAmelCase__ )
for edge in edges:
g.add_edge(*UpperCAmelCase__ )
return g
class _snake_case :
"""simple docstring"""
def __init__( self ) -> str:
a_ = {}
a_ = {}
def __len__( self ) -> Dict:
return len(self.parent )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Optional[int]:
if item in self.parent:
return self.find(UpperCAmelCase__ )
a_ = item
a_ = 0
return item
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Optional[int]:
if item not in self.parent:
return self.make_set(UpperCAmelCase__ )
if item != self.parent[item]:
a_ = self.find(self.parent[item] )
return self.parent[item]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
a_ = self.find(UpperCAmelCase__ )
a_ = self.find(UpperCAmelCase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
a_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
a_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
a_ = roota
return roota
return None
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> Any:
a_ = graph.num_vertices
a_ = Graph.UnionFind()
a_ = []
while num_components > 1:
a_ = {}
for vertex in graph.get_vertices():
a_ = -1
a_ = graph.get_edges()
for edge in edges:
a_ , a_ , a_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
a_ , a_ , a_ = edge
a_ = union_find.find(UpperCAmelCase__ )
a_ = union_find.find(UpperCAmelCase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
a_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
a_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
a_ , a_ , a_ = cheap_edge[vertex]
if union_find.find(UpperCAmelCase__ ) != union_find.find(UpperCAmelCase__ ):
union_find.union(UpperCAmelCase__ , UpperCAmelCase__ )
mst_edges.append(cheap_edge[vertex] )
a_ = num_components - 1
a_ = Graph.build(edges=UpperCAmelCase__ )
return mst
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
assert column_title.isupper()
a_ = 0
a_ = len(_UpperCAmelCase ) - 1
a_ = 0
while index >= 0:
a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase ={
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase =logging.get_logger(__name__)
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]:
super().__init__(
feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = spectrogram_length
a_ = num_channels
a_ = patch_size
a_ = feature_size // self.patch_size[1]
a_ = n_fft
a_ = sampling_rate // hop_length_to_sampling_rate
a_ = sampling_rate
a_ = padding_value
a_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray:
a_ = spectrogram(
UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
a_ = log_spec[:, :-1]
a_ = log_spec - 2_0.0
a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a_ = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a_ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCAmelCase__ ):
a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a_ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a_ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a_ = np.array(UpperCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a_ = padded_audio_features * self.padding_value
for i in range(len(UpperCAmelCase__ ) ):
a_ = audio_features[i]
a_ = feature
# return as BatchFeature
if return_attention_mask:
a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
a_ = {'audio_values': padded_audio_features}
a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
return encoded_inputs
| 697 | 1 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ) -> List[str]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_UpperCAmelCase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def a ( ) -> int:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def a ( ) -> Union[str, Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_UpperCAmelCase ):
http_head('https://huggingface.co' )
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
a_ = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
a_ = [element for element in set_a if element in set_b]
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
a_ = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
__lowerCAmelCase ={"a", "b", "c", "d", "e"}
__lowerCAmelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 697 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"nielsr/canine-s": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
__lowerCAmelCase =111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__lowerCAmelCase =0
__lowerCAmelCase =0xe_0_0_0
__lowerCAmelCase =0xe_0_0_1
__lowerCAmelCase =0xe_0_0_2
__lowerCAmelCase =0xe_0_0_3
__lowerCAmelCase =0xe_0_0_4
# Maps special codepoints to human-readable names.
__lowerCAmelCase ={
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
__lowerCAmelCase ={name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCAmelCase__=chr(UpperCAmelCase__ ) , UpperCAmelCase__=chr(UpperCAmelCase__ ) , UpperCAmelCase__=chr(UpperCAmelCase__ ) , UpperCAmelCase__=chr(UpperCAmelCase__ ) , UpperCAmelCase__=chr(UpperCAmelCase__ ) , UpperCAmelCase__=chr(UpperCAmelCase__ ) , UpperCAmelCase__=False , UpperCAmelCase__=2048 , **UpperCAmelCase__ , ) -> Union[str, Any]:
a_ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else bos_token
a_ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else eos_token
a_ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else sep_token
a_ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else cls_token
a_ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a_ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , model_max_length=UpperCAmelCase__ , **UpperCAmelCase__ , )
# Creates a mapping for looking up the IDs of special symbols.
a_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
a_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
a_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
a_ = UNICODE_VOCAB_SIZE
a_ = len(self._special_codepoints )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self._unicode_vocab_size
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> List[str]:
return list(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> int:
try:
return ord(UpperCAmelCase__ )
except TypeError:
raise ValueError(F'''invalid token: \'{token}\'''' )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCAmelCase__ )
except TypeError:
raise ValueError(F'''invalid id: {index}''' )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Any:
return "".join(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
a_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
a_ = [1] + ([0] * len(UpperCAmelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCAmelCase__ )) + [1]
return result
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
a_ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Any:
return ()
| 697 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
a_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 1 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase =[
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None ) -> Tuple:
a_ = None
a_ = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
a_ = os.path.abspath('examples' )
for item in os.listdir(UpperCAmelCase__ ):
if item not in EXCLUDE_EXAMPLES:
a_ = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
if os.path.isfile(UpperCAmelCase__ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCAmelCase__ , feature_script=UpperCAmelCase__ , tested_section='main()' if parser_only else 'training_function()' , ):
a_ = compare_against_test(
os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
a_ = '\n'.join(UpperCAmelCase__ )
if special_strings is not None:
for string in special_strings:
a_ = diff.replace(UpperCAmelCase__ , '' )
self.assertEqual(UpperCAmelCase__ , '' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.one_complete_example('complete_nlp_example.py' , UpperCAmelCase__ )
self.one_complete_example('complete_nlp_example.py' , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
a_ = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.one_complete_example('complete_cv_example.py' , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = False
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> Union[str, Any]:
super().setUpClass()
a_ = tempfile.mkdtemp()
a_ = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
a_ = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> Dict:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
a_ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
'''.split()
a_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
self.assertNotIn('epoch 0:' , UpperCAmelCase__ )
self.assertIn('epoch 1:' , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
'''.split()
a_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
if torch.cuda.is_available():
a_ = torch.cuda.device_count()
else:
a_ = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , UpperCAmelCase__ )
self.assertIn('epoch 1:' , UpperCAmelCase__ )
else:
self.assertIn('epoch 0:' , UpperCAmelCase__ )
self.assertIn('epoch 1:' , UpperCAmelCase__ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
a_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
a_ = re.findall('({.+})' , UpperCAmelCase__ )
a_ = [r for r in results if 'accuracy' in r][-1]
a_ = ast.literal_eval(UpperCAmelCase__ )
self.assertGreaterEqual(results['accuracy'] , 0.7_5 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
a_ = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'tracking' ) ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 697 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _snake_case ( snake_case ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str:
super().__init__(
features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = Generator(
cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
a_ = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
a_ = None
a_ = None
a_ = None
a_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
a_ = self.builder.as_dataset(
split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 697 | 1 |
'''simple docstring'''
__lowerCAmelCase =9.8_0665
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = g ) -> float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 697 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase =numpy.array([0, 0])
__lowerCAmelCase =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase =numpy.array([1, 0])
__lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(_UpperCAmelCase ):
a_ = iteration_step(_UpperCAmelCase )
return vectors
def a ( _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(_UpperCAmelCase )
a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 697 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ ) -> None:
a_ = value
a_ = None
a_ = None
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ ) -> None:
a_ = tree
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def a ( _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
a_ = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = set()
for token in tokens:
a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
a_ = list(_UpperCAmelCase )
return word_list
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
a_ = bert_tokens
a_ , a_ = 0, len(_UpperCAmelCase )
while start < end:
a_ = True
if is_chinese(bert_word[start] ):
a_ = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
a_ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ = '##' + bert_word[j]
a_ = start + i
a_ = False
break
if single_word:
start += 1
return bert_word
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
a_ = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
a_ = []
for id in input_ids:
a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
a_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
a_ = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a_ = f.readlines()
a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ = LTP(args.ltp ) # faster in GPU device
a_ = BertTokenizer.from_pretrained(args.bert )
a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__lowerCAmelCase =parser.parse_args()
main(args)
| 697 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]:
a_ = size if size is not None else {'height': 18, 'width': 18}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = apply_ocr
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# with apply_OCR = True
a_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
a_ = Image.open(ds[0]['file'] ).convert('RGB' )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 697 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": 512,
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = RetriBertTokenizer
_UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**UpperCAmelCase__ )
a_ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str:
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 697 | 1 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Optional[Any]:
a_ = 3
a_ = 250
a_ = ids_tensor((batch_size, length) , UpperCAmelCase__ )
a_ = torch.ones((batch_size, length) , device=UpperCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ , a_ = self._get_tensors(5 )
a_ = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
a_ , a_ = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
a_ , a_ = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = MaxLengthCriteria(max_length=10 )
a_ , a_ = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
a_ , a_ = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
a_ , a_ = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
a_ , a_ = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
a_ , a_ = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
a_ , a_ = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
a_ = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ , a_ = self._get_tensors(5 )
a_ = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
a_ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
a_ = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCAmelCase__ ) , 1 )
| 697 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "lilt"
def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = classifier_dropout
a_ = channel_shrink_ratio
a_ = max_ad_position_embeddings
| 697 | 1 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> list:
"""simple docstring"""
if len(_UpperCAmelCase ) < 2:
return collection
def circle_sort_util(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
a_ = False
if low == high:
return swapped
a_ = low
a_ = high
while left < right:
if collection[left] > collection[right]:
a_ , a_ = (
collection[right],
collection[left],
)
a_ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
a_ , a_ = (
collection[right + 1],
collection[left],
)
a_ = True
a_ = low + int((high - low) / 2 )
a_ = circle_sort_util(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
a_ = circle_sort_util(_UpperCAmelCase , mid + 1 , _UpperCAmelCase )
return swapped or left_swap or right_swap
a_ = True
while is_not_sorted is True:
a_ = circle_sort_util(_UpperCAmelCase , 0 , len(_UpperCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
__lowerCAmelCase =input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase =[int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 697 |
'''simple docstring'''
from __future__ import annotations
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# We need to create solution object to save path.
a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase )
if solved:
print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
a_ = 1
return True
a_ = (not i < 0) and (not j < 0) # Check lower bounds
a_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
a_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
a_ = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase )
):
return True
a_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": 512,
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = RetriBertTokenizer
_UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**UpperCAmelCase__ )
a_ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str:
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 697 |
'''simple docstring'''
__lowerCAmelCase ={
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
__lowerCAmelCase ={
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
"""simple docstring"""
a_ = from_type.lower().strip('s' )
a_ = to_type.lower().strip('s' )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
a_ = METRIC_CONVERSION[from_sanitized]
a_ = METRIC_CONVERSION[to_sanitized]
a_ = 1
if from_exponent > to_exponent:
a_ = from_exponent - to_exponent
else:
a_ = -(to_exponent - from_exponent)
return value * pow(1_0 , _UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=3 , UpperCAmelCase__=224 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__=[0.5, 0.5, 0.5] , UpperCAmelCase__=[0.5, 0.5, 0.5] , ) -> List[Any]:
a_ = size if size is not None else {'height': 18, 'width': 18}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = do_normalize
a_ = image_mean
a_ = image_std
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = ViTImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = EfficientFormerImageProcessorTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return self.image_proc_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processor
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
a_ = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
a_ = image_processor(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
# Initialize image_processor
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
a_ = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
a_ = image_processor(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processor
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
a_ = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
a_ = image_processor(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
| 697 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase ="naver-clova-ix/donut-base"
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
a_ = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
a_ = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.