code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class _snake_case ( snake_case_ ):
'''simple docstring'''
__snake_case = "xlnet"
__snake_case = ["mems"]
__snake_case = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: Tuple , __UpperCamelCase: Optional[int]=3_2000 , __UpperCamelCase: List[str]=1024 , __UpperCamelCase: List[str]=24 , __UpperCamelCase: List[str]=16 , __UpperCamelCase: Any=4096 , __UpperCamelCase: Optional[Any]="gelu" , __UpperCamelCase: Any=True , __UpperCamelCase: int="bi" , __UpperCamelCase: Optional[Any]=0.0_2 , __UpperCamelCase: Optional[Any]=1E-12 , __UpperCamelCase: List[str]=0.1 , __UpperCamelCase: Optional[Any]=512 , __UpperCamelCase: List[str]=None , __UpperCamelCase: str=True , __UpperCamelCase: List[str]=False , __UpperCamelCase: Any=False , __UpperCamelCase: Any=-1 , __UpperCamelCase: List[Any]=False , __UpperCamelCase: Tuple="last" , __UpperCamelCase: Union[str, Any]=True , __UpperCamelCase: Optional[Any]="tanh" , __UpperCamelCase: List[Any]=0.1 , __UpperCamelCase: str=5 , __UpperCamelCase: List[Any]=5 , __UpperCamelCase: Optional[int]=5 , __UpperCamelCase: int=1 , __UpperCamelCase: int=2 , **__UpperCamelCase: List[str] , ) -> Tuple:
__magic_name__ : Any = vocab_size
__magic_name__ : str = d_model
__magic_name__ : List[Any] = n_layer
__magic_name__ : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
__magic_name__ : Tuple = d_model // n_head
__magic_name__ : Optional[int] = ff_activation
__magic_name__ : List[Any] = d_inner
__magic_name__ : str = untie_r
__magic_name__ : Dict = attn_type
__magic_name__ : List[Any] = initializer_range
__magic_name__ : Any = layer_norm_eps
__magic_name__ : Any = dropout
__magic_name__ : Any = mem_len
__magic_name__ : int = reuse_len
__magic_name__ : List[Any] = bi_data
__magic_name__ : Tuple = clamp_len
__magic_name__ : int = same_length
__magic_name__ : Union[str, Any] = summary_type
__magic_name__ : str = summary_use_proj
__magic_name__ : Optional[int] = summary_activation
__magic_name__ : Any = summary_last_dropout
__magic_name__ : str = start_n_top
__magic_name__ : str = end_n_top
__magic_name__ : Optional[int] = bos_token_id
__magic_name__ : Any = pad_token_id
__magic_name__ : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __UpperCamelCase , )
__magic_name__ : List[Any] = kwargs["use_cache"]
__magic_name__ : int = use_mems_eval
__magic_name__ : Union[str, Any] = use_mems_train
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self: Tuple ) -> List[str]:
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def lowerCAmelCase__ ( self: str , __UpperCamelCase: str ) -> Optional[int]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 436 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class _snake_case ( snake_case_ , snake_case_ ):
'''simple docstring'''
__snake_case = "resnet"
__snake_case = ["basic", "bottleneck"]
def __init__( self: List[Any] , __UpperCamelCase: Tuple=3 , __UpperCamelCase: List[str]=64 , __UpperCamelCase: int=[256, 512, 1024, 2048] , __UpperCamelCase: Union[str, Any]=[3, 4, 6, 3] , __UpperCamelCase: str="bottleneck" , __UpperCamelCase: List[Any]="relu" , __UpperCamelCase: List[str]=False , __UpperCamelCase: List[str]=None , __UpperCamelCase: Tuple=None , **__UpperCamelCase: Dict , ) -> Union[str, Any]:
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
__magic_name__ : int = num_channels
__magic_name__ : Optional[Any] = embedding_size
__magic_name__ : str = hidden_sizes
__magic_name__ : Any = depths
__magic_name__ : int = layer_type
__magic_name__ : Any = hidden_act
__magic_name__ : Optional[int] = downsample_in_first_stage
__magic_name__ : List[Any] = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(__UpperCamelCase ) + 1 )]
__magic_name__ , __magic_name__ : int = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
class _snake_case ( snake_case_ ):
'''simple docstring'''
__snake_case = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self: Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self: Any ) -> float:
return 1E-3 | 436 | 1 |
from __future__ import annotations
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> int | float:
if len(_lowercase ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(_lowercase )
or left < -len(_lowercase )
or right >= len(_lowercase )
or right < -len(_lowercase )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
UpperCamelCase = (left + right) >> 1 # the middle
UpperCamelCase = find_max(_lowercase , _lowercase , _lowercase ) # find max in range[left, mid]
UpperCamelCase = find_max(_lowercase , mid + 1 , _lowercase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 170 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =DanceDiffusionPipeline
SCREAMING_SNAKE_CASE_ : str =UNCONDITIONAL_AUDIO_GENERATION_PARAMS
SCREAMING_SNAKE_CASE_ : int =PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
SCREAMING_SNAKE_CASE_ : Optional[int] =UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ : Optional[int] =False
SCREAMING_SNAKE_CASE_ : Union[str, Any] =False
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=SCREAMING_SNAKE_CASE__ , use_timestep_embedding=SCREAMING_SNAKE_CASE__ , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
UpperCamelCase = IPNDMScheduler()
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=0 ):
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = DanceDiffusionPipeline(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = pipe(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = output.audios
UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCamelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def __lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
UpperCamelCase = torch_device
UpperCamelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
UpperCamelCase = output.audios
UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = torch_device
UpperCamelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
UpperCamelCase = output.audios
UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 170 | 1 |
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
def __init__( self : Any , snake_case_ : Optional[int]=None ):
"""simple docstring"""
A : Optional[Any] = data
A : List[Any] = None
def __repr__( self : Optional[int] ):
"""simple docstring"""
A : Tuple = []
A : Any = self
while temp:
string_rep.append(f"""{temp.data}""" )
A : Union[str, Any] = temp.next
return "->".join(snake_case_ )
def _lowerCamelCase ( lowerCamelCase_: list ):
'''simple docstring'''
if not elements_list:
raise Exception('''The Elements List is empty''' )
A : List[Any] = Node(elements_list[0] )
for i in range(1 , len(lowerCamelCase_ ) ):
A : Dict = Node(elements_list[i] )
A : List[Any] = current.next
return head
def _lowerCamelCase ( lowerCamelCase_: Node ):
'''simple docstring'''
if head_node is not None and isinstance(lowerCamelCase_ , lowerCamelCase_ ):
print_reverse(head_node.next )
print(head_node.data )
def _lowerCamelCase ( ):
'''simple docstring'''
from doctest import testmod
testmod()
A : List[str] = make_linked_list([14, 52, 14, 12, 43] )
print('''Linked List:''' )
print(lowerCamelCase_ )
print('''Elements in Reverse:''' )
print_reverse(lowerCamelCase_ )
if __name__ == "__main__":
main() | 256 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 42
lowerCamelCase_ = 42
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , snake_case_ : int ):
"""simple docstring"""
A : list[list[Edge]] = [[] for _ in range(snake_case_ )]
A : Union[str, Any] = size
def __getitem__( self : Any , snake_case_ : int ):
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
return self._size
def _UpperCAmelCase ( self : Tuple , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(snake_case_ , snake_case_ ) )
def _UpperCAmelCase ( self : str , snake_case_ : int , snake_case_ : int ):
"""simple docstring"""
A : Tuple = deque([start_vertex] )
A : list[int | None] = [None] * self.size
A : str = 0
while queue:
A : Optional[Any] = queue.popleft()
A : List[str] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A : Tuple = current_distance + edge.weight
A : Dict = distances[edge.destination_vertex]
if (
isinstance(snake_case_ , snake_case_ )
and new_distance >= dest_vertex_distance
):
continue
A : Optional[int] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 256 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ : str = '''CLIPImageProcessor'''
UpperCamelCase_ : Any = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self : Any , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : int = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : int , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : Dict ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if images is not None:
SCREAMING_SNAKE_CASE : Dict = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def _A ( self : List[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str] ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 488 |
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 488 | 1 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_snake_case = logging.get_logger(__name__)
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Any = UNetaDModel
a_ : str = 'sample'
@property
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = 4
lowerCamelCase__ = 3
lowerCamelCase__ = (32, 32)
lowerCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.tensor([10] ).to(SCREAMING_SNAKE_CASE__ )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self : int ):
return (3, 32, 32)
@property
def _UpperCamelCase ( self : Any ):
return (3, 32, 32)
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
lowerCamelCase__ = self.dummy_input
return init_dict, inputs_dict
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Any = UNetaDModel
a_ : Dict = 'sample'
@property
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = 4
lowerCamelCase__ = 4
lowerCamelCase__ = (32, 32)
lowerCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.tensor([10] ).to(SCREAMING_SNAKE_CASE__ )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self : Dict ):
return (4, 32, 32)
@property
def _UpperCamelCase ( self : List[str] ):
return (4, 32, 32)
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
lowerCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ , lowerCamelCase__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ , lowerCamelCase__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def _UpperCamelCase ( self : Tuple ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
lowerCamelCase__ , lowerCamelCase__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=SCREAMING_SNAKE_CASE__ )
model_accelerate.to(SCREAMING_SNAKE_CASE__ )
model_accelerate.eval()
lowerCamelCase__ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase__ = noise.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.tensor([10] * noise.shape[0] ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model_accelerate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCamelCase__ , lowerCamelCase__ = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=SCREAMING_SNAKE_CASE__ , low_cpu_mem_usage=SCREAMING_SNAKE_CASE__ )
model_normal_load.to(SCREAMING_SNAKE_CASE__ )
model_normal_load.eval()
lowerCamelCase__ = model_normal_load(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )['sample']
assert torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1e-3 )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase__ = noise.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.tensor([10] * noise.shape[0] ).to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
lowerCamelCase__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase__ = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1e-3 ) )
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : int = UNetaDModel
a_ : int = 'sample'
@property
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple=(32, 32) ):
lowerCamelCase__ = 4
lowerCamelCase__ = 3
lowerCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=SCREAMING_SNAKE_CASE__ )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self : Tuple ):
return (3, 32, 32)
@property
def _UpperCamelCase ( self : Any ):
return (3, 32, 32)
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1e-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
lowerCamelCase__ = self.dummy_input
return init_dict, inputs_dict
@slow
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ , lowerCamelCase__ = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.dummy_input
lowerCamelCase__ = floats_tensor((4, 3) + (2_56, 2_56) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = noise
lowerCamelCase__ = model(**SCREAMING_SNAKE_CASE__ )
assert image is not None, "Make sure output is not None"
@slow
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 4
lowerCamelCase__ = 3
lowerCamelCase__ = (2_56, 2_56)
lowerCamelCase__ = torch.ones((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.tensor(batch_size * [1e-4] ).to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
lowerCamelCase__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase__ = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1e-2 ) )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 4
lowerCamelCase__ = 3
lowerCamelCase__ = (32, 32)
lowerCamelCase__ = torch.ones((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.tensor(batch_size * [1e-4] ).to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
lowerCamelCase__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase__ = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1e-2 ) )
def _UpperCamelCase ( self : int ):
# not required for this model
pass
| 510 |
"""simple docstring"""
_snake_case = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 510 | 1 |
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__: Tuple = 10
def snake_case_ ( _lowerCAmelCase : list[int] ) -> list[int]:
UpperCAmelCase : Dict = 1
UpperCAmelCase : Any = max(_lowerCAmelCase )
while placement <= max_digit:
# declare and initialize empty buckets
UpperCAmelCase : list[list] = [[] for _ in range(_lowerCAmelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
UpperCAmelCase : str = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCAmelCase )
# put each buckets' contents into list_of_ints
UpperCAmelCase : Optional[int] = 0
for b in range(_lowerCAmelCase ):
for i in buckets[b]:
UpperCAmelCase : str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 528 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__: Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__: Tuple = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """time_series_transformer"""
lowerCamelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : str , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "student_t" , __snake_case : str = "nll" , __snake_case : int = 1 , __snake_case : List[int] = [1, 2, 3, 4, 5, 6, 7] , __snake_case : Optional[Union[str, bool]] = "mean" , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : Optional[List[int]] = None , __snake_case : Optional[List[int]] = None , __snake_case : int = 32 , __snake_case : int = 32 , __snake_case : int = 2 , __snake_case : int = 2 , __snake_case : int = 2 , __snake_case : int = 2 , __snake_case : bool = True , __snake_case : str = "gelu" , __snake_case : int = 64 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : int = 100 , __snake_case : float = 0.02 , __snake_case : Optional[Any]=True , **__snake_case : List[Any] , ) -> List[str]:
# time series specific configuration
UpperCAmelCase : List[Any] = prediction_length
UpperCAmelCase : List[Any] = context_length or prediction_length
UpperCAmelCase : Tuple = distribution_output
UpperCAmelCase : Optional[Any] = loss
UpperCAmelCase : Tuple = input_size
UpperCAmelCase : Optional[int] = num_time_features
UpperCAmelCase : Dict = lags_sequence
UpperCAmelCase : Any = scaling
UpperCAmelCase : Tuple = num_dynamic_real_features
UpperCAmelCase : Any = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase : Any = cardinality
else:
UpperCAmelCase : Optional[Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase : Optional[int] = embedding_dimension
else:
UpperCAmelCase : Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : int = input_size * len(__snake_case ) + self._number_of_features
UpperCAmelCase : int = d_model
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : str = decoder_attention_heads
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : Any = decoder_ffn_dim
UpperCAmelCase : Any = encoder_layers
UpperCAmelCase : str = decoder_layers
UpperCAmelCase : Optional[int] = dropout
UpperCAmelCase : Union[str, Any] = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Optional[int] = encoder_layerdrop
UpperCAmelCase : Dict = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Optional[int] = use_cache
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def A ( self : Union[str, Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 528 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 174 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""LayoutLMv3FeatureExtractor"""]
lowerCAmelCase = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 174 | 1 |
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
a : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case , snake_case=7_6_8 ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : List[str] = proj_size
UpperCAmelCase : Union[str, Any] = CLIPVisionModel(snake_case )
UpperCAmelCase : Union[str, Any] = PaintByExampleMapper(snake_case )
UpperCAmelCase : List[Any] = nn.LayerNorm(config.hidden_size )
UpperCAmelCase : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCAmelCase : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def A_ ( self , snake_case , snake_case=False ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model(pixel_values=snake_case )
UpperCAmelCase : Optional[Any] = clip_output.pooler_output
UpperCAmelCase : Optional[int] = self.mapper(latent_states[:, None] )
UpperCAmelCase : Tuple = self.final_layer_norm(snake_case )
UpperCAmelCase : int = self.proj_out(snake_case )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : List[Any] = (config.num_hidden_layers + 1) // 5
UpperCAmelCase : Optional[int] = config.hidden_size
UpperCAmelCase : Dict = 1
UpperCAmelCase : Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(snake_case , snake_case , snake_case , activation_fn="gelu" , attention_bias=snake_case )
for _ in range(snake_case )
] )
def A_ ( self , snake_case ):
'''simple docstring'''
for block in self.blocks:
UpperCAmelCase : str = block(snake_case )
return hidden_states
| 609 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : str = "▁"
a : Union[str, Any] = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
a : Tuple = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
a : List[str] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
a : List[str] = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
a : Optional[int] = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = ["input_ids"]
SCREAMING_SNAKE_CASE__ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = RESOURCE_FILES_NAMES
def __init__( self , snake_case , snake_case=None , snake_case=False , snake_case="utf8" , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case = None , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , vocab_file=snake_case , encoding=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
UpperCAmelCase : Tuple = do_lower_case
UpperCAmelCase : str = sentencepiece_model_ckpt
UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase : Optional[int] = self.load_vocab(filepath=snake_case )
else:
UpperCAmelCase : Optional[Any] = {self.sp_model.id_to_piece(snake_case ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
def A_ ( self , snake_case ):
'''simple docstring'''
if text is None:
return None
UpperCAmelCase : List[str] = self.tokenize(snake_case )
UpperCAmelCase , UpperCAmelCase : str = "", []
for i, ch in enumerate(snake_case ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase : Tuple = self.SP_CHAR_MAPPING.get(snake_case )
else:
UpperCAmelCase : Optional[Any] = unicodedata.normalize("NFKC" , snake_case )
if self.is_whitespace(snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(snake_case ) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase : List[str] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase : Optional[Any] = token[1:]
UpperCAmelCase : List[str] = text[offset:].index(snake_case ) + offset
UpperCAmelCase : Optional[int] = start + len(snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase : List[Any] = end
return token_mapping
@property
def A_ ( self ):
'''simple docstring'''
return len(self.vocab )
def A_ ( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
'''simple docstring'''
UpperCAmelCase : int = self.__dict__.copy()
UpperCAmelCase : List[str] = None
return state
def __setstate__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase : int = {}
UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def A_ ( self , snake_case ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(snake_case , snake_case ) for c in text) )
def A_ ( self , snake_case , snake_case=False , snake_case=6_4 , snake_case=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
UpperCAmelCase : Any = True
if self.sp_model_kwargs.get("alpha" ) is not None:
UpperCAmelCase : List[str] = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
UpperCAmelCase : Optional[int] = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
UpperCAmelCase : Dict = self.sp_model.EncodeAsPieces(snake_case )
else:
UpperCAmelCase : List[str] = self.sp_model.SampleEncodeAsPieces(snake_case , snake_case , snake_case )
UpperCAmelCase : Dict = []
for pi, piece in enumerate(snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(snake_case ) and pi != 0:
new_pieces.append(snake_case )
continue
else:
continue
UpperCAmelCase : Optional[int] = 0
for i, chunk in enumerate(snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(snake_case ) or self.is_punct(snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(snake_case )
UpperCAmelCase : Optional[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase : Dict = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase : int = i
if len(snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = "".join(snake_case ).replace(snake_case , " " ).strip()
return out_string
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = self.convert_ids_to_tokens(snake_case )
UpperCAmelCase : List[Any] = "".join(snake_case ).replace(snake_case , " " ).strip()
return out_string
def A_ ( self , snake_case ):
'''simple docstring'''
return self.vocab.get(snake_case , self.vocab.get(self.unk_token ) )
def A_ ( self , snake_case ):
'''simple docstring'''
return self.reverse_vocab.get(snake_case , self.unk_token )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
UpperCAmelCase : Optional[int] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def A_ ( self , snake_case , snake_case=None , snake_case=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(snake_case ) + 1) + [1] * (len(snake_case ) + 3)
def A_ ( self , snake_case ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def A_ ( self , snake_case ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def A_ ( self , snake_case ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def A_ ( self , snake_case ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(snake_case ) == 1:
UpperCAmelCase : List[str] = unicodedata.category(snake_case )
if cat == "Zs":
return True
return False
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with io.open(snake_case , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(snake_case ):
UpperCAmelCase : Union[str, Any] = line.rstrip("\n" )
UpperCAmelCase : Tuple = int(snake_case )
return token_to_idx
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Any = 0
if os.path.isdir(snake_case ):
UpperCAmelCase : int = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
UpperCAmelCase : List[str] = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(snake_case , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase : Optional[Any] = token_index
writer.write(token + "\n" )
index += 1
UpperCAmelCase : List[str] = os.path.join(snake_case , "sentencepiece.bpe.model" )
with open(snake_case , "wb" ) as fi:
UpperCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (vocab_file,)
| 609 | 1 |
import unittest
import numpy as np
def _lowerCamelCase( __snake_case , __snake_case , __snake_case , __snake_case = None , ) -> np.ndarray:
__snake_case = np.shape(__snake_case )
__snake_case = np.shape(__snake_case )
__snake_case = np.shape(__snake_case )
if shape_a[0] != shape_b[0]:
__snake_case = (
"Expected the same number of rows for A and B. "
f"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(__snake_case )
if shape_b[1] != shape_c[1]:
__snake_case = (
"Expected the same number of columns for B and C. "
f"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(__snake_case )
__snake_case = pseudo_inv
if a_inv is None:
try:
__snake_case = np.linalg.inv(__snake_case )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
__snake_case = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__snake_case = np.array([[0, 3], [3, 0], [2, 3]] )
__snake_case = np.array([[2, 1], [6, 3]] )
__snake_case = schur_complement(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
__snake_case = np.block([[a, b], [b.T, c]] )
__snake_case = np.linalg.det(_lowerCAmelCase )
__snake_case = np.linalg.det(_lowerCAmelCase )
__snake_case = np.linalg.det(_lowerCAmelCase )
self.assertAlmostEqual(_lowerCAmelCase ,det_a * det_s )
def UpperCamelCase_ ( self : str ):
"""simple docstring"""
__snake_case = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__snake_case = np.array([[0, 3], [3, 0], [2, 3]] )
__snake_case = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_lowerCAmelCase ):
schur_complement(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self : List[str] ):
"""simple docstring"""
__snake_case = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__snake_case = np.array([[0, 3], [3, 0], [2, 3]] )
__snake_case = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_lowerCAmelCase ):
schur_complement(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 524 | import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def _lowerCamelCase( __snake_case , __snake_case ) -> Dict:
__snake_case = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def _lowerCamelCase( __snake_case , __snake_case ) -> Dict:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__snake_case = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__snake_case = in_proj_weight[
: encoder_config.hidden_size, :
]
__snake_case = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__snake_case = in_proj_weight[
-encoder_config.hidden_size :, :
]
def _lowerCamelCase( __snake_case , __snake_case , __snake_case ) -> List[Any]:
__snake_case = dct.pop(__snake_case )
__snake_case = val
def _lowerCamelCase( __snake_case ) -> str:
if "handwritten" in checkpoint_url:
__snake_case = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__snake_case = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
__snake_case = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert("RGB" )
return im
@torch.no_grad()
def _lowerCamelCase( __snake_case , __snake_case ) -> int:
__snake_case = ViTConfig(image_size=384 , qkv_bias=__snake_case )
__snake_case = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__snake_case = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
__snake_case = 1024
__snake_case = 4096
__snake_case = 24
__snake_case = 16
__snake_case = 1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__snake_case = False
__snake_case = "relu"
__snake_case = 1024
__snake_case = True
__snake_case = False
__snake_case = False
# load HuggingFace model
__snake_case = ViTModel(__snake_case , add_pooling_layer=__snake_case )
__snake_case = TrOCRForCausalLM(__snake_case )
__snake_case = VisionEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case )
model.eval()
# load state_dict of original model, rename some keys
__snake_case = torch.hub.load_state_dict_from_url(__snake_case , map_location="cpu" , check_hash=__snake_case )["model"]
__snake_case = create_rename_keys(__snake_case , __snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
read_in_q_k_v(__snake_case , __snake_case )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(__snake_case )
if key.startswith("decoder" ) and "output_projection" not in key:
__snake_case = val
else:
__snake_case = val
# load state dict
model.load_state_dict(__snake_case )
# Check outputs on an image
__snake_case = ViTImageProcessor(size=encoder_config.image_size )
__snake_case = RobertaTokenizer.from_pretrained("roberta-large" )
__snake_case = TrOCRProcessor(__snake_case , __snake_case )
__snake_case = processor(images=prepare_img(__snake_case ) , return_tensors="pt" ).pixel_values
# verify logits
__snake_case = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__snake_case = model(pixel_values=__snake_case , decoder_input_ids=__snake_case )
__snake_case = outputs.logits
__snake_case = torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
__snake_case = torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
__snake_case = torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
__snake_case = torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
__snake_case = torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , __snake_case , atol=1e-3 ), "First elements of logits not as expected"
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__snake_case )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCamelCase__ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 524 | 1 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowerCamelCase : Tuple = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : int = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError("""Unsupported Group""" )
__lowercase : Tuple = primes[group]["""prime"""]
__lowercase : str = primes[group]["""generator"""]
__lowercase : Union[str, Any] = int(hexlify(urandom(32 ) ) , base=16 )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase : Dict = pow(self.generator , self.__private_key , self.prime )
return hex(__a )[2:]
def lowerCAmelCase ( self : Optional[int] , __a : int ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(__a , (self.prime - 1) // 2 , self.prime ) == 1
)
def lowerCAmelCase ( self : List[str] , __a : str ) -> str:
"""simple docstring"""
__lowercase : Dict = int(__a , base=16 )
if not self.is_valid_public_key(__a ):
raise ValueError("""Invalid public key""" )
__lowercase : Dict = pow(__a , self.__private_key , self.prime )
return shaaaa(str(__a ).encode() ).hexdigest()
@staticmethod
def lowerCAmelCase ( __a : int , __a : int ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(__a , (prime - 1) // 2 , __a ) == 1
)
@staticmethod
def lowerCAmelCase ( __a : str , __a : str , __a : int = 14 ) -> str:
"""simple docstring"""
__lowercase : str = int(__a , base=16 )
__lowercase : Any = int(__a , base=16 )
__lowercase : str = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(__a , __a ):
raise ValueError("""Invalid public key""" )
__lowercase : str = pow(__a , __a , __a )
return shaaaa(str(__a ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod() | 649 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = LongformerTokenizer
_A : int = True
_A : Optional[int] = LongformerTokenizerFast
_A : int = True
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
__lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple , **__a : Tuple ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : int = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """lower newer"""
__lowercase : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : str = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
__lowercase : int = tokens + [tokenizer.unk_token]
__lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
__lowercase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Tuple = """Encode this sequence."""
__lowercase : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
__lowercase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
__lowercase : List[Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
__lowercase : List[str] = """Encode <mask> sequence"""
__lowercase : List[str] = """Encode <mask>sequence"""
__lowercase : Union[str, Any] = tokenizer.encode(__a )
__lowercase : Dict = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
__lowercase : int = tokenizer.encode(__a )
__lowercase : Union[str, Any] = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Optional[Any] = """A, <mask> AllenNLP sentence."""
__lowercase : Union[str, Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowercase : Optional[Any] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : int = F"{text_of_1_token} {text_of_1_token}"
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : int = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , ) | 649 | 1 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
_a : Union[str, Any] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_a : int = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _a (lowercase__ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
__snake_case = []
for i in range(len(lowercase__ ) ):
__snake_case = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__snake_case = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowercase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowercase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowercase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__snake_case = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowercase__ )
return next_generation
def _a (lowercase__ : list[list[int]] , lowercase__ : int ) -> list[Image.Image]:
"""simple docstring"""
__snake_case = []
for _ in range(lowercase__ ):
# Create output image
__snake_case = Image.new('RGB' , (len(cells[0] ), len(lowercase__ )) )
__snake_case = img.load()
# Save cells to image
for x in range(len(lowercase__ ) ):
for y in range(len(cells[0] ) ):
__snake_case = 2_5_5 - cells[y][x] * 2_5_5
__snake_case = (colour, colour, colour)
# Save image
images.append(lowercase__ )
__snake_case = new_generation(lowercase__ )
return images
if __name__ == "__main__":
_a : Union[str, Any] = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 56 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowercase = logging.get_logger(__name__)
_lowercase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_SCREAMING_SNAKE_CASE )} )
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
_lowercase : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : int = field(
default=1_2_8 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
_lowercase : int = field(
default=6_4 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
_lowercase : int = field(
default=3_0 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
_lowercase : float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase : int = field(
default=2_0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase : int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
_lowercase : int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = '''train'''
_lowercase : Union[str, Any] = '''dev'''
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : SquadDataTrainingArguments
_lowercase : List[SquadFeatures]
_lowercase : Split
_lowercase : bool
def __init__( self , _lowercase , _lowercase , _lowercase = None , _lowercase = Split.train , _lowercase = False , _lowercase = None , _lowercase = "pt" , ):
"""simple docstring"""
_lowerCAmelCase = args
_lowerCAmelCase = is_language_sensitive
_lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
_lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
_lowerCAmelCase = mode
# Load data features from cache or dataset file
_lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
_lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
_lowerCAmelCase = time.time()
_lowerCAmelCase = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCAmelCase = self.old_features["""features"""]
_lowerCAmelCase = self.old_features.get("""dataset""" , _lowercase )
_lowerCAmelCase = self.old_features.get("""examples""" , _lowercase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
_lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
_lowerCAmelCase , _lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
_lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.features[i]
_lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
_lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 5 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCamelCase :
def __init__(self : List[str] , _A : Tuple , _A : str=1_3 , _A : Tuple=7 , _A : Optional[int]=True , _A : int=True , _A : Union[str, Any]=True , _A : Dict=True , _A : List[Any]=9_9 , _A : Optional[int]=3_2 , _A : int=2 , _A : str=4 , _A : Optional[Any]=3_7 , _A : str="gelu" , _A : str=0.1 , _A : Union[str, Any]=0.1 , _A : Union[str, Any]=5_1_2 , _A : List[Any]=1_6 , _A : Optional[Any]=2 , _A : Any=0.02 , _A : Any=3 , _A : int=4 , _A : Any=None , ) -> List[str]:
snake_case = parent
snake_case = 1_3
snake_case = 7
snake_case = True
snake_case = True
snake_case = True
snake_case = True
snake_case = 9_9
snake_case = 3_8_4
snake_case = 2
snake_case = 4
snake_case = 3_7
snake_case = '''gelu'''
snake_case = 0.1
snake_case = 0.1
snake_case = 5_1_2
snake_case = 1_6
snake_case = 2
snake_case = 0.02
snake_case = 3
snake_case = 4
snake_case = 1_2_8
snake_case = 2
snake_case = 9
snake_case = 1
snake_case = None
def UpperCAmelCase(self : List[str] ) -> Optional[Any]:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = ids_tensor([self.batch_size] , self.num_choices )
snake_case = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase(self : Any , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Optional[Any] , _A : int , _A : Optional[int] , _A : List[Any] ) -> int:
snake_case = TFConvBertModel(config=lowerCamelCase__ )
snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case = [input_ids, input_mask]
snake_case = model(lowerCamelCase__ )
snake_case = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase(self : int , _A : Union[str, Any] , _A : Optional[int] , _A : Any , _A : Tuple , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> str:
snake_case = TFConvBertForMaskedLM(config=lowerCamelCase__ )
snake_case = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
snake_case = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase(self : Tuple , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[Any] , _A : Union[str, Any] , _A : List[str] ) -> int:
snake_case = self.num_labels
snake_case = TFConvBertForSequenceClassification(config=lowerCamelCase__ )
snake_case = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
snake_case = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase(self : Optional[int] , _A : List[str] , _A : Any , _A : str , _A : Any , _A : Optional[Any] , _A : Dict , _A : Union[str, Any] ) -> Optional[Any]:
snake_case = self.num_choices
snake_case = TFConvBertForMultipleChoice(config=lowerCamelCase__ )
snake_case = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
snake_case = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase(self : Optional[int] , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Optional[int] , _A : Union[str, Any] , _A : Dict , _A : str ) -> Optional[int]:
snake_case = self.num_labels
snake_case = TFConvBertForTokenClassification(config=lowerCamelCase__ )
snake_case = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
snake_case = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase(self : List[str] , _A : int , _A : List[Any] , _A : List[Any] , _A : List[str] , _A : Optional[int] , _A : Any , _A : Any ) -> str:
snake_case = TFConvBertForQuestionAnswering(config=lowerCamelCase__ )
snake_case = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
snake_case = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase(self : int ) -> Optional[Any]:
snake_case = self.prepare_config_and_inputs()
(
snake_case
) = config_and_inputs
snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase__ : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : List[Any] = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[Any] = False
def UpperCAmelCase(self : Optional[Any] ) -> Dict:
snake_case = TFConvBertModelTester(self )
snake_case = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=3_7 )
def UpperCAmelCase(self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase(self : str ) -> str:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase(self : Dict ) -> Dict:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def UpperCAmelCase(self : str ) -> int:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase__ )
def UpperCAmelCase(self : Optional[int] ) -> int:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
def UpperCAmelCase(self : Union[str, Any] ) -> List[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def UpperCAmelCase(self : List[Any] ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
@slow
def UpperCAmelCase(self : Any ) -> Tuple:
snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = True
snake_case = True
if hasattr(lowerCamelCase__ , "use_cache" ):
snake_case = True
snake_case = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
snake_case = getattr(self.model_tester , "key_length" , lowerCamelCase__ )
for model_class in self.all_model_classes:
snake_case = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
snake_case = model_class(lowerCamelCase__ )
snake_case = len(model(lowerCamelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ , saved_model=lowerCamelCase__ )
snake_case = os.path.join(lowerCamelCase__ , "saved_model" , "1" )
snake_case = tf.keras.models.load_model(lowerCamelCase__ )
snake_case = model(lowerCamelCase__ )
if self.is_encoder_decoder:
snake_case = outputs['''encoder_hidden_states''']
snake_case = outputs['''encoder_attentions''']
else:
snake_case = outputs['''hidden_states''']
snake_case = outputs['''attentions''']
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
snake_case = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCAmelCase(self : Tuple ) -> Union[str, Any]:
snake_case = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCAmelCase(self : int ) -> int:
snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = True
snake_case = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
snake_case = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
snake_case = getattr(self.model_tester , "key_length" , lowerCamelCase__ )
snake_case = getattr(self.model_tester , "key_length" , lowerCamelCase__ )
def check_decoder_attentions_output(_A : Any ):
snake_case = len(lowerCamelCase__ )
self.assertEqual(out_len % 2 , 0 )
snake_case = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_A : Tuple ):
snake_case = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
snake_case = True
snake_case = False
snake_case = model_class(lowerCamelCase__ )
snake_case = model(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
snake_case = len(lowerCamelCase__ )
self.assertEqual(config.output_hidden_states , lowerCamelCase__ )
check_encoder_attentions_output(lowerCamelCase__ )
if self.is_encoder_decoder:
snake_case = model_class(lowerCamelCase__ )
snake_case = model(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase__ )
check_decoder_attentions_output(lowerCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case = True
snake_case = model_class(lowerCamelCase__ )
snake_case = model(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase__ )
check_encoder_attentions_output(lowerCamelCase__ )
# Check attention is always last and order is fine
snake_case = True
snake_case = True
snake_case = model_class(lowerCamelCase__ )
snake_case = model(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase__ )
check_encoder_attentions_output(lowerCamelCase__ )
@require_tf
class lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase(self : Optional[Any] ) -> Optional[Any]:
snake_case = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
snake_case = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case = model(lowerCamelCase__ )[0]
snake_case = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowerCamelCase__ )
snake_case = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 )
| 718 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__(self : Optional[Any] , _A : List[str] , _A : Tuple=7 , _A : List[str]=3 , _A : Dict=1_8 , _A : int=3_0 , _A : str=4_0_0 , _A : List[str]=True , _A : Optional[int]=None , _A : Optional[int]=True , ) -> Dict:
snake_case = size if size is not None else {"height": 1_8, "width": 1_8}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = apply_ocr
def UpperCAmelCase(self : Union[str, Any] ) -> int:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCamelCase ( A_ , unittest.TestCase ):
UpperCAmelCase__ : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCAmelCase(self : List[str] ) -> Dict:
snake_case = LayoutLMvaImageProcessingTester(self )
@property
def UpperCAmelCase(self : Union[str, Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase(self : Any ) -> Optional[int]:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , "do_resize" ) )
self.assertTrue(hasattr(_A , "size" ) )
self.assertTrue(hasattr(_A , "apply_ocr" ) )
def UpperCAmelCase(self : Union[str, Any] ) -> List[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def UpperCAmelCase(self : List[Any] ) -> Optional[Any]:
pass
def UpperCAmelCase(self : Dict ) -> Optional[int]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , _A )
self.assertIsInstance(encoding.boxes , _A )
# Test batched
snake_case = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase(self : Tuple ) -> List[str]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
snake_case = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase(self : Optional[Any] ) -> Tuple:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
snake_case = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase(self : Any ) -> List[str]:
# with apply_OCR = True
snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
snake_case = Image.open(ds[0]["file"] ).convert("RGB" )
snake_case = image_processing(_A , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
snake_case = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _A )
self.assertListEqual(encoding.boxes , _A )
# with apply_OCR = False
snake_case = LayoutLMvaImageProcessor(apply_ocr=_A )
snake_case = image_processing(_A , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 294 | 0 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Tuple ) -> Any:
# Initialise PyTorch model
lowerCamelCase_ = BigBirdConfig.from_json_file(_lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
lowerCamelCase_ = BigBirdForQuestionAnswering(_lowerCamelCase )
else:
lowerCamelCase_ = BigBirdForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_lowerCamelCase , _lowerCamelCase , is_trivia_qa=_lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 549 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Optional[Any] = """summarization"""
SCREAMING_SNAKE_CASE : Any = ["""loss"""]
SCREAMING_SNAKE_CASE : Optional[int] = ROUGE_KEYS
SCREAMING_SNAKE_CASE : Optional[int] = """rouge2"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCamelCase_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(__SCREAMING_SNAKE_CASE , num_labels=__SCREAMING_SNAKE_CASE , mode=self.mode , **__SCREAMING_SNAKE_CASE )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
lowerCamelCase_ = Path(self.output_dir ) / 'metrics.json'
lowerCamelCase_ = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
lowerCamelCase_ = 0
lowerCamelCase_ = defaultdict(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.config.model_type
lowerCamelCase_ = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
lowerCamelCase_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCamelCase_ = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
lowerCamelCase_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCamelCase_ = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCamelCase_ = get_git_info()['repo_sha']
lowerCamelCase_ = hparams.num_workers
lowerCamelCase_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCamelCase_ = self.decoder_start_token_id
lowerCamelCase_ = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
lowerCamelCase_ = False
lowerCamelCase_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCamelCase_ = self.hparams.eval_max_gen_length
else:
lowerCamelCase_ = self.model.config.max_length
lowerCamelCase_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] ) -> Dict[str, List[str]]:
lowerCamelCase_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(__SCREAMING_SNAKE_CASE , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
lowerCamelCase_ = True
return readable_batch
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
return self.model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] ) -> int:
lowerCamelCase_ = self.tokenizer.batch_decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
return lmap(str.strip , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : dict ) -> Tuple:
lowerCamelCase_ = self.tokenizer.pad_token_id
lowerCamelCase_ , lowerCamelCase_ = batch['input_ids'], batch['attention_mask']
lowerCamelCase_ = batch['labels']
if isinstance(self.model , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = self.model._shift_right(__SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ = shift_tokens_right(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCamelCase_ = decoder_input_ids
self.save_readable_batch(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCamelCase_ = nn.CrossEntropyLoss(ignore_index=__SCREAMING_SNAKE_CASE )
assert lm_logits.shape[-1] == self.vocab_size
lowerCamelCase_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCamelCase_ = nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 )
lowerCamelCase_ , lowerCamelCase_ = label_smoothed_nll_loss(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.hparams.label_smoothing , ignore_index=__SCREAMING_SNAKE_CASE )
return (loss,)
@property
def UpperCamelCase ( self : Optional[Any] ) -> int:
return self.tokenizer.pad_token_id
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Dict:
lowerCamelCase_ = self._step(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = dict(zip(self.loss_names , __SCREAMING_SNAKE_CASE ) )
# tokens per batch
lowerCamelCase_ = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
lowerCamelCase_ = batch['input_ids'].shape[0]
lowerCamelCase_ = batch['input_ids'].eq(self.pad ).sum()
lowerCamelCase_ = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int ) -> Dict:
return self._generative_step(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]="val" ) -> Dict:
self.step_count += 1
lowerCamelCase_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCamelCase_ = losses['loss']
lowerCamelCase_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
lowerCamelCase_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCamelCase_ = torch.tensor(__SCREAMING_SNAKE_CASE ).type_as(__SCREAMING_SNAKE_CASE )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
lowerCamelCase_ = self.step_count
self.metrics[prefix].append(__SCREAMING_SNAKE_CASE ) # callback writes this to self.metrics_save_path
lowerCamelCase_ = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
return calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : dict ) -> dict:
lowerCamelCase_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCamelCase_ = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=__SCREAMING_SNAKE_CASE , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCamelCase_ = (time.time() - ta) / batch['input_ids'].shape[0]
lowerCamelCase_ = self.ids_to_clean_text(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.ids_to_clean_text(batch['labels'] )
lowerCamelCase_ = self._step(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = dict(zip(self.loss_names , __SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ = self.calc_generative_metrics(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = np.mean(lmap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
base_metrics.update(gen_time=__SCREAMING_SNAKE_CASE , gen_len=__SCREAMING_SNAKE_CASE , preds=__SCREAMING_SNAKE_CASE , target=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return base_metrics
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int ) -> Any:
return self._generative_step(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple:
return self.validation_epoch_end(__SCREAMING_SNAKE_CASE , prefix='test' )
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] ) -> SeqaSeqDataset:
lowerCamelCase_ = self.n_obs[type_path]
lowerCamelCase_ = self.target_lens[type_path]
lowerCamelCase_ = self.dataset_class(
self.tokenizer , type_path=__SCREAMING_SNAKE_CASE , n_obs=__SCREAMING_SNAKE_CASE , max_target_length=__SCREAMING_SNAKE_CASE , **self.dataset_kwargs , )
return dataset
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> DataLoader:
lowerCamelCase_ = self.get_dataset(__SCREAMING_SNAKE_CASE )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCamelCase_ = dataset.make_sortish_sampler(__SCREAMING_SNAKE_CASE , distributed=self.hparams.gpus > 1 )
return DataLoader(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , shuffle=__SCREAMING_SNAKE_CASE , num_workers=self.num_workers , sampler=__SCREAMING_SNAKE_CASE , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCamelCase_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__SCREAMING_SNAKE_CASE , batch_sampler=__SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , shuffle=__SCREAMING_SNAKE_CASE , num_workers=self.num_workers , sampler=__SCREAMING_SNAKE_CASE , )
def UpperCamelCase ( self : Dict ) -> DataLoader:
lowerCamelCase_ = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
return dataloader
def UpperCamelCase ( self : int ) -> DataLoader:
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def UpperCamelCase ( self : int ) -> DataLoader:
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCamelCase ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
add_generic_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
parser.add_argument(
'--max_source_length' , default=1024 , type=__SCREAMING_SNAKE_CASE , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=__SCREAMING_SNAKE_CASE , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=__SCREAMING_SNAKE_CASE , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=__SCREAMING_SNAKE_CASE , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=__SCREAMING_SNAKE_CASE )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=__SCREAMING_SNAKE_CASE )
parser.add_argument('--max_tokens_per_batch' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
parser.add_argument('--logger_name' , type=__SCREAMING_SNAKE_CASE , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=__SCREAMING_SNAKE_CASE , default=500 , required=__SCREAMING_SNAKE_CASE , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=__SCREAMING_SNAKE_CASE , default='summarization' , required=__SCREAMING_SNAKE_CASE , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=__SCREAMING_SNAKE_CASE , default=0.0 , required=__SCREAMING_SNAKE_CASE )
parser.add_argument('--src_lang' , type=__SCREAMING_SNAKE_CASE , default='' , required=__SCREAMING_SNAKE_CASE )
parser.add_argument('--tgt_lang' , type=__SCREAMING_SNAKE_CASE , default='' , required=__SCREAMING_SNAKE_CASE )
parser.add_argument('--eval_beams' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
parser.add_argument(
'--val_metric' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=__SCREAMING_SNAKE_CASE , default=1 , required=__SCREAMING_SNAKE_CASE , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Union[str, Any] = """translation"""
SCREAMING_SNAKE_CASE : List[str] = ["""loss"""]
SCREAMING_SNAKE_CASE : str = ["""bleu"""]
SCREAMING_SNAKE_CASE : Optional[int] = """bleu"""
def __init__( self : str , __SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = hparams.src_lang
lowerCamelCase_ = hparams.tgt_lang
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> dict:
return calculate_bleu(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=_lowerCamelCase )
check_output_dir(_lowerCamelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCamelCase_ = SummarizationModule(_lowerCamelCase )
else:
lowerCamelCase_ = TranslationModule(_lowerCamelCase )
lowerCamelCase_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
lowerCamelCase_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase_ = os.environ.get('WANDB_PROJECT' , _lowerCamelCase )
lowerCamelCase_ = WandbLogger(name=model.output_dir.name , project=_lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase_ = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
lowerCamelCase_ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCamelCase_ = False
lowerCamelCase_ = args.val_metric == 'loss'
lowerCamelCase_ = generic_train(
_lowerCamelCase , _lowerCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowerCamelCase ) , early_stopping_callback=_lowerCamelCase , logger=_lowerCamelCase , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
lowerCamelCase_ = ''
lowerCamelCase_ = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=_lowerCamelCase ) )
if checkpoints:
lowerCamelCase_ = checkpoints[-1]
lowerCamelCase_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE : Any = pl.Trainer.add_argparse_args(parser)
_SCREAMING_SNAKE_CASE : Any = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
main(args)
| 549 | 1 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_snake_case : str = 16
_snake_case : Optional[int] = 32
def _A ( __snake_case :Tuple ) -> Tuple:
"""simple docstring"""
return int(x / 2**20 )
class __SCREAMING_SNAKE_CASE :
def __enter__( self ) -> Dict:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated()
return self
def __exit__( self, *_a ) -> List[str]:
gc.collect()
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated()
__SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
__SCREAMING_SNAKE_CASE = bamb(self.end - self.begin )
__SCREAMING_SNAKE_CASE = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def _A ( __snake_case :Accelerator , __snake_case :int = 16 , __snake_case :str = "bert-base-cased" , __snake_case :int = 320 , __snake_case :int = 160 , ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__snake_case )
__SCREAMING_SNAKE_CASE = load_dataset(
"glue" , "mrpc" , split={"train": f'''train[:{n_train}]''', "validation": f'''validation[:{n_val}]'''} )
def tokenize_function(__snake_case :Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__SCREAMING_SNAKE_CASE = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case :Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
def _A ( __snake_case :Tuple , __snake_case :str ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE = config["lr"]
__SCREAMING_SNAKE_CASE = int(config["num_epochs"] )
__SCREAMING_SNAKE_CASE = int(config["seed"] )
__SCREAMING_SNAKE_CASE = int(config["batch_size"] )
__SCREAMING_SNAKE_CASE = args.model_name_or_path
set_seed(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_dataloaders(__snake_case , __snake_case , __snake_case , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__SCREAMING_SNAKE_CASE = optimizer_cls(params=model.parameters() , lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
__SCREAMING_SNAKE_CASE = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , )
else:
__SCREAMING_SNAKE_CASE = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
__SCREAMING_SNAKE_CASE = 0
# We also need to keep track of the stating epoch so files are named properly
__SCREAMING_SNAKE_CASE = 0
# Now we train the model
__SCREAMING_SNAKE_CASE = {}
for epoch in range(__snake_case , __snake_case ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = outputs.loss
__SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__SCREAMING_SNAKE_CASE = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__snake_case , __snake_case )
def _A ( ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__snake_case , default=__snake_case , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__snake_case , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__snake_case , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__snake_case , default=1 , help="Number of train epochs." , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 214 |
from typing import List
import numpy as np
def _A ( __snake_case :dict ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
__SCREAMING_SNAKE_CASE = max(lists_lengths.values() , default=0 )
return max(1 , __snake_case )
def _A ( __snake_case :int , __snake_case :int ) -> List[range]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for group_idx in range(__snake_case ):
__SCREAMING_SNAKE_CASE = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__SCREAMING_SNAKE_CASE = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__SCREAMING_SNAKE_CASE = range(__snake_case , start + num_shards_to_add )
shards_indices_per_group.append(__snake_case )
return shards_indices_per_group
def _A ( __snake_case :dict , __snake_case :int ) -> List[dict]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = _number_of_shards_in_gen_kwargs(__snake_case )
if num_shards == 1:
return [dict(__snake_case )]
else:
__SCREAMING_SNAKE_CASE = _distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__snake_case , __snake_case )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__snake_case ) )
]
def _A ( __snake_case :List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __snake_case )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _A ( __snake_case :np.random.Generator , __snake_case :dict ) -> dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )}
__SCREAMING_SNAKE_CASE = {}
for size in list_sizes:
__SCREAMING_SNAKE_CASE = list(range(__snake_case ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__SCREAMING_SNAKE_CASE = dict(__snake_case )
for key, value in shuffled_kwargs.items():
if isinstance(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = [value[i] for i in indices_per_size[len(__snake_case )]]
return shuffled_kwargs
| 214 | 1 |
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase_ : Tuple = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCAmelCase_ : Any = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : List[str] = len(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : float = -1
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if arr[i] < arr[j]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = arr[j]
break
result.append(SCREAMING_SNAKE_CASE__ )
return result
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = []
for i, outer in enumerate(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_SCREAMING_SNAKE_CASE : Any = inner
break
result.append(SCREAMING_SNAKE_CASE__ )
return result
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : list[float] = []
_SCREAMING_SNAKE_CASE : list[float] = [-1] * arr_size
for index in reversed(range(SCREAMING_SNAKE_CASE__ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_SCREAMING_SNAKE_CASE : int = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCAmelCase_ : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 533 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return "".join(chr(ord(SCREAMING_SNAKE_CASE__ ) - 32 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 533 | 1 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
a = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
a = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
a = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
a = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
a = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 1_4]),
('2H 5D 3C AS 5S', False, [1_4, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
a = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
a = (
('JH AH TH KH QH', 2_3),
('JH 9H TH KH QH', 2_2),
('JC KH JS JD JH', 2_1),
('KH KC 3S 3H 3D', 2_0),
('8C 9C 5C 3C TC', 1_9),
('JS QS 9H TS KH', 1_8),
('7C 7S KH 2H 7H', 1_7),
('3C KH 5D 5S KH', 1_6),
('QH 8H KD JH 8S', 1_5),
('2D 6D 9D TH 7D', 1_4),
)
def UpperCAmelCase_ ( ):
lowercase_ , lowercase_ = randrange(len(UpperCAmelCase__ ) ), randrange(len(UpperCAmelCase__ ) )
lowercase_ = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
lowercase_ , lowercase_ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCAmelCase_ ( UpperCAmelCase__ = 1_0_0 ):
return (generate_random_hand() for _ in range(UpperCAmelCase__ ))
@pytest.mark.parametrize("""hand, expected""" , UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
assert PokerHand(UpperCAmelCase__ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
assert PokerHand(UpperCAmelCase__ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = PokerHand(UpperCAmelCase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
assert PokerHand(UpperCAmelCase__ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
assert PokerHand(UpperCAmelCase__ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
assert PokerHand(UpperCAmelCase__ ).compare_with(PokerHand(UpperCAmelCase__ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
assert PokerHand(UpperCAmelCase__ ).compare_with(PokerHand(UpperCAmelCase__ ) ) == expected
def UpperCAmelCase_ ( ):
lowercase_ = [PokerHand(UpperCAmelCase__ ) for hand in SORTED_HANDS]
lowercase_ = poker_hands.copy()
shuffle(UpperCAmelCase__ )
lowercase_ = chain(sorted(UpperCAmelCase__ ) )
for index, hand in enumerate(UpperCAmelCase__ ):
assert hand == poker_hands[index]
def UpperCAmelCase_ ( ):
# Test that five high straights are compared correctly.
lowercase_ = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=UpperCAmelCase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCAmelCase_ ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
lowercase_ = PokerHand("""2C 4S AS 3D 5C""" )
lowercase_ = True
lowercase_ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCAmelCase_ ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
lowercase_ = 0
lowercase_ = os.path.abspath(os.path.dirname(UpperCAmelCase__ ) )
lowercase_ = os.path.join(UpperCAmelCase__ , """poker_hands.txt""" )
with open(UpperCAmelCase__ ) as file_hand:
for line in file_hand:
lowercase_ = line[:1_4].strip()
lowercase_ = line[1_5:].strip()
lowercase_ , lowercase_ = PokerHand(UpperCAmelCase__ ), PokerHand(UpperCAmelCase__ )
lowercase_ = player.compare_with(UpperCAmelCase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 650 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = True
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
lowercase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """single_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """multi_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ids_tensor([1, 10] , config.vocab_size )
lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = {"""type""": scaling_type, """factor""": 10.0}
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 650 | 1 |
from __future__ import annotations
import math
def _a ( a :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( a :int ) -> list[int]:
a = str(a )
a = [n]
for i in range(1 , len(a ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _a ( a :int ) -> bool:
if len(str(a ) ) > 3:
if not is_prime(int(str(a )[-3:] ) ) or not is_prime(int(str(a )[:3] ) ):
return False
return True
def _a ( a :int = 11 ) -> list[int]:
a = []
a = 13
while len(a ) != count:
if validate(a ):
a = list_truncated_nums(a )
if all(is_prime(a ) for i in list_nums ):
list_truncated_primes.append(a )
num += 2
return list_truncated_primes
def _a ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"""{sum(compute_truncated_primes(11)) = }""")
| 117 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class lowercase_ :
'''simple docstring'''
pass
| 117 | 1 |
'''simple docstring'''
def __a ( A__ , A__ ) -> float:
return base * power(A__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
lowercase : Optional[int] = int(input('Enter the base: ').strip())
lowercase : List[Any] = int(input('Enter the exponent: ').strip())
lowercase : str = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowercase : Dict = 1 / result
print(f"{base} to the power of {exponent} is {result}")
| 159 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __a ( A__ , A__=False ) -> str:
try:
lowerCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
lowerCAmelCase = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
lowercase : Any = parse_flag_from_env('RUN_SLOW', default=False)
lowercase : Dict = parse_flag_from_env('RUN_REMOTE', default=False)
lowercase : Union[str, Any] = parse_flag_from_env('RUN_LOCAL', default=True)
lowercase : int = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
lowercase : List[str] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
lowercase : Tuple = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
lowercase : str = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
lowercase : int = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
lowercase : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
lowercase : List[str] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
lowercase : List[str] = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __a ( A__ ) -> Union[str, Any]:
try:
import faiss # noqa
except ImportError:
lowerCAmelCase = unittest.skip("test requires faiss" )(A__ )
return test_case
def __a ( A__ ) -> Optional[int]:
try:
import regex # noqa
except ImportError:
lowerCAmelCase = unittest.skip("test requires regex" )(A__ )
return test_case
def __a ( A__ ) -> Union[str, Any]:
try:
import elasticsearch # noqa
except ImportError:
lowerCAmelCase = unittest.skip("test requires elasticsearch" )(A__ )
return test_case
def __a ( A__ ) -> str:
try:
import sqlalchemy # noqa
except ImportError:
lowerCAmelCase = unittest.skip("test requires sqlalchemy" )(A__ )
return test_case
def __a ( A__ ) -> Tuple:
if not config.TORCH_AVAILABLE:
lowerCAmelCase = unittest.skip("test requires PyTorch" )(A__ )
return test_case
def __a ( A__ ) -> Tuple:
if not config.TF_AVAILABLE:
lowerCAmelCase = unittest.skip("test requires TensorFlow" )(A__ )
return test_case
def __a ( A__ ) -> List[str]:
if not config.JAX_AVAILABLE:
lowerCAmelCase = unittest.skip("test requires JAX" )(A__ )
return test_case
def __a ( A__ ) -> Tuple:
if not config.PIL_AVAILABLE:
lowerCAmelCase = unittest.skip("test requires Pillow" )(A__ )
return test_case
def __a ( A__ ) -> List[str]:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(A__ )
else:
return test_case
def __a ( A__ ) -> List[str]:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(A__ )
else:
return test_case
def __a ( A__ ) -> Dict:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(A__ )
else:
return test_case
def __a ( A__ ) -> Dict:
def _require_spacy_model(A__ ):
try:
import spacy # noqa F401
spacy.load(A__ )
except ImportError:
return unittest.skip("test requires spacy" )(A__ )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(A__ ) )(A__ )
else:
return test_case
return _require_spacy_model
def __a ( A__ ) -> List[Any]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(A__ )
else:
return test_case
def __a ( A__ ) -> str:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(A__ )
else:
return test_case
def __a ( A__ ) -> Union[str, Any]:
if not _run_slow_tests or _run_slow_tests == 0:
lowerCAmelCase = unittest.skip("test is slow" )(A__ )
return test_case
def __a ( A__ ) -> int:
if not _run_local_tests or _run_local_tests == 0:
lowerCAmelCase = unittest.skip("test is local" )(A__ )
return test_case
def __a ( A__ ) -> List[Any]:
if not _run_packaged_tests or _run_packaged_tests == 0:
lowerCAmelCase = unittest.skip("test is packaged" )(A__ )
return test_case
def __a ( A__ ) -> int:
if not _run_remote_tests or _run_remote_tests == 0:
lowerCAmelCase = unittest.skip("test requires remote" )(A__ )
return test_case
def __a ( *A__ ) -> Tuple:
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(A__ ) and name.startswith("test" ):
for decorator in decorators:
lowerCAmelCase = decorator(A__ )
setattr(cls , A__ , A__ )
return cls
return decorate
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
pass
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 2
@contextmanager
def __a ( A__=OfflineSimulationMode.CONNECTION_FAILS , A__=1e-16 ) -> Tuple:
lowerCAmelCase = requests.Session().request
def timeout_request(A__ , A__ , A__ , **A__ ):
# Change the url to an invalid url so that the connection hangs
lowerCAmelCase = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
lowerCAmelCase = timeout
try:
return online_request(A__ , A__ , **A__ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowerCAmelCase = url
lowerCAmelCase = e.args[0]
lowerCAmelCase = (max_retry_error.args[0].replace("10.255.255.1" , f"OfflineMock[{url}]" ),)
lowerCAmelCase = (max_retry_error,)
raise
def raise_connection_error(A__ , A__ , **A__ ):
raise requests.ConnectionError("Offline mode is enabled." , request=A__ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , A__ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , A__ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , A__ ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def __a ( *A__ , **A__ ) -> Optional[Any]:
lowerCAmelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*A__ , **A__ ) as tmp_dir:
try:
os.chdir(A__ )
yield
finally:
os.chdir(A__ )
@contextmanager
def __a ( ) -> List[Any]:
import gc
gc.collect()
lowerCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __a ( ) -> Dict:
import gc
gc.collect()
lowerCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __a ( A__ , A__ ) -> Optional[int]:
return deepcopy(A__ ).integers(0 , 100 , 10 ).tolist() == deepcopy(A__ ).integers(0 , 100 , 10 ).tolist()
def __a ( A__ ) -> Optional[int]:
import decorator
from requests.exceptions import HTTPError
def _wrapper(A__ , *A__ , **A__ ):
try:
return func(*A__ , **A__ )
except HTTPError as err:
if str(A__ ).startswith("500" ) or str(A__ ).startswith("502" ):
pytest.xfail(str(A__ ) )
raise err
return decorator.decorator(_wrapper , A__ )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = returncode
lowerCAmelCase = stdout
lowerCAmelCase = stderr
async def __a ( A__ , A__ ) -> Union[str, Any]:
while True:
lowerCAmelCase = await stream.readline()
if line:
callback(A__ )
else:
break
async def __a ( A__ , A__=None , A__=None , A__=None , A__=False , A__=False ) -> _RunOutput:
if echo:
print("\nRunning: " , " ".join(A__ ) )
lowerCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCAmelCase = []
lowerCAmelCase = []
def tee(A__ , A__ , A__ , A__="" ):
lowerCAmelCase = line.decode("utf-8" ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ , A__ , file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label="stderr:" ) ),
] , timeout=A__ , )
return _RunOutput(await p.wait() , A__ , A__ )
def __a ( A__ , A__=None , A__=None , A__=180 , A__=False , A__=True ) -> _RunOutput:
lowerCAmelCase = asyncio.get_event_loop()
lowerCAmelCase = loop.run_until_complete(
_stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) )
lowerCAmelCase = " ".join(A__ )
if result.returncode > 0:
lowerCAmelCase = "\n".join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def __a ( ) -> str:
lowerCAmelCase = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
lowerCAmelCase = re.sub(r"^gw" , "" , A__ , 0 , re.M )
return int(A__ )
def __a ( ) -> int:
lowerCAmelCase = 2_9500
lowerCAmelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 159 | 1 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 568 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Dict = 'openai/whisper-base'
A : Optional[Any] = (
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
A : Dict = 'transcriber'
A : Any = WhisperProcessor
A : Any = WhisperForConditionalGeneration
A : Union[str, Any] = ['audio']
A : Optional[int] = ['text']
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
return self.pre_processor(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_features
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return self.model.generate(inputs=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0]
| 568 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = Dict[str, Any]
lowerCAmelCase_ = List[Prediction]
@add_end_docstrings(A__ )
class snake_case_ ( A__ ):
"""simple docstring"""
def __init__( self , *UpperCamelCase , **UpperCamelCase):
super().__init__(*UpperCamelCase , **UpperCamelCase)
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , "vision")
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items()))
def __UpperCAmelCase ( self , **UpperCamelCase):
lowerCamelCase__ = {}
if "threshold" in kwargs:
lowerCamelCase__ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *UpperCamelCase , **UpperCamelCase):
return super().__call__(*UpperCamelCase , **UpperCamelCase)
def __UpperCAmelCase ( self , UpperCamelCase):
lowerCamelCase__ = load_image(UpperCamelCase)
lowerCamelCase__ = torch.IntTensor([[image.height, image.width]])
lowerCamelCase__ = self.image_processor(images=[image] , return_tensors="pt")
if self.tokenizer is not None:
lowerCamelCase__ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt")
lowerCamelCase__ = target_size
return inputs
def __UpperCAmelCase ( self , UpperCamelCase):
lowerCamelCase__ = model_inputs.pop("target_size")
lowerCamelCase__ = self.model(**UpperCamelCase)
lowerCamelCase__ = outputs.__class__({"target_size": target_size, **outputs})
if self.tokenizer is not None:
lowerCamelCase__ = model_inputs["bbox"]
return model_outputs
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase=0.9):
lowerCamelCase__ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowerCamelCase__ , lowerCamelCase__ = target_size[0].tolist()
def unnormalize(UpperCamelCase):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
]))
lowerCamelCase__ , lowerCamelCase__ = model_outputs["logits"].squeeze(0).softmax(dim=-1).max(dim=-1)
lowerCamelCase__ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowerCamelCase__ = [unnormalize(UpperCamelCase) for bbox in model_outputs["bbox"].squeeze(0)]
lowerCamelCase__ = ["score", "label", "box"]
lowerCamelCase__ = [dict(zip(UpperCamelCase , UpperCamelCase)) for vals in zip(scores.tolist() , UpperCamelCase , UpperCamelCase) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowerCamelCase__ = self.image_processor.post_process_object_detection(UpperCamelCase , UpperCamelCase , UpperCamelCase)
lowerCamelCase__ = raw_annotations[0]
lowerCamelCase__ = raw_annotation["scores"]
lowerCamelCase__ = raw_annotation["labels"]
lowerCamelCase__ = raw_annotation["boxes"]
lowerCamelCase__ = scores.tolist()
lowerCamelCase__ = [self.model.config.idalabel[label.item()] for label in labels]
lowerCamelCase__ = [self._get_bounding_box(UpperCamelCase) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowerCamelCase__ = ["score", "label", "box"]
lowerCamelCase__ = [
dict(zip(UpperCamelCase , UpperCamelCase))
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"])
]
return annotation
def __UpperCAmelCase ( self , UpperCamelCase):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch.")
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = box.int().tolist()
lowerCamelCase__ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 426 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] ='''xglm'''
__lowerCAmelCase : str =['''past_key_values''']
__lowerCAmelCase : Dict ={
'''num_attention_heads''': '''attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , UpperCamelCase=25_60_08 , UpperCamelCase=20_48 , UpperCamelCase=10_24 , UpperCamelCase=40_96 , UpperCamelCase=24 , UpperCamelCase=16 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.0_2 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=2 , UpperCamelCase=1 , UpperCamelCase=0 , UpperCamelCase=2 , **UpperCamelCase , ):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = d_model
lowerCamelCase__ = ffn_dim
lowerCamelCase__ = num_layers
lowerCamelCase__ = attention_heads
lowerCamelCase__ = activation_function
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = layerdrop
lowerCamelCase__ = init_std
lowerCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase__ = use_cache
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , decoder_start_token_id=UpperCamelCase , **UpperCamelCase , )
| 426 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = '''gpt_neo'''
SCREAMING_SNAKE_CASE__ : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : Dict = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Union[str, Any] , snake_case : Tuple=50257 , snake_case : Union[str, Any]=2048 , snake_case : Optional[int]=2048 , snake_case : Optional[Any]=24 , snake_case : List[str]=[[["global", "local"], 12]] , snake_case : int=16 , snake_case : Optional[Any]=None , snake_case : Tuple=256 , snake_case : Optional[int]="gelu_new" , snake_case : Optional[int]=0.0 , snake_case : Dict=0.0 , snake_case : Optional[int]=0.0 , snake_case : Any=0.1 , snake_case : Any=1e-5 , snake_case : Optional[Any]=0.02 , snake_case : Tuple=True , snake_case : List[Any]=50256 , snake_case : Any=50256 , **snake_case : Dict , ):
"""simple docstring"""
_snake_case : Tuple = vocab_size
_snake_case : Tuple = max_position_embeddings
_snake_case : Tuple = hidden_size
_snake_case : List[Any] = num_layers
_snake_case : List[Any] = num_heads
_snake_case : int = intermediate_size
_snake_case : Union[str, Any] = window_size
_snake_case : Dict = activation_function
_snake_case : Optional[int] = resid_dropout
_snake_case : Optional[int] = embed_dropout
_snake_case : Optional[int] = attention_dropout
_snake_case : List[Any] = classifier_dropout
_snake_case : Dict = layer_norm_epsilon
_snake_case : Optional[int] = initializer_range
_snake_case : Dict = use_cache
_snake_case : Tuple = bos_token_id
_snake_case : Dict = eos_token_id
_snake_case : List[Any] = attention_types
_snake_case : List[str] = self.expand_attention_types_params(snake_case )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
F"""`config.num_layers = {self.num_layers}`. """
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
@staticmethod
def __UpperCAmelCase ( snake_case : Optional[Any] ):
"""simple docstring"""
_snake_case : Any = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase__ ( a__ , a__ , a__ , a__) -> Optional[Any]:
"""simple docstring"""
import torch
_snake_case : List[Any] = input.size()
_snake_case : Optional[Any] = len(a__)
_snake_case : List[str] = shape[dimension]
_snake_case : int = torch.arange(0 , a__ , a__)
_snake_case : Optional[int] = torch.div(sizedim - size , a__ , rounding_mode='floor') + 1
_snake_case : Optional[int] = torch.arange(a__) + low_indices[:min_length][:, None]
_snake_case : Optional[int] = [slice(a__)] * rank
_snake_case : Optional[Any] = indices
_snake_case : int = input[s]
_snake_case : str = list(range(0 , rank + 1))
perm.append(perm.pop(dimension + 1))
return sliced.permute(a__)
def lowerCamelCase__ ( a__ , a__) -> Optional[Any]:
"""simple docstring"""
import torch
_snake_case : Optional[Any] = torch.arange(1 , a__)
_snake_case : Dict = torch.remainder(a__ , a__)
_snake_case : Any = remainders == 0
_snake_case : Any = candidates[divisor_indices]
_snake_case : Tuple = torch.max(a__)
return largest_divisor, torch.div(a__ , a__ , rounding_mode='floor')
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case : List[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case , direction='inputs' )
_snake_case : Optional[int] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return self._config.num_heads
def __UpperCAmelCase ( self : Tuple , snake_case : PreTrainedTokenizer , snake_case : int = -1 , snake_case : int = -1 , snake_case : bool = False , snake_case : Optional[TensorType] = None , ):
"""simple docstring"""
_snake_case : Dict = super(snake_case , self ).generate_dummy_inputs(
snake_case , batch_size=snake_case , seq_length=snake_case , is_pair=snake_case , framework=snake_case )
# We need to order the input in the way they appears in the forward()
_snake_case : str = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case : List[str] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case : Optional[Any] = seqlen + 2
_snake_case : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case : Tuple = [
(torch.zeros(snake_case ), torch.zeros(snake_case )) for _ in range(self.num_layers )
]
_snake_case : Optional[int] = common_inputs['attention_mask']
if self.use_past:
_snake_case : str = ordered_inputs['attention_mask'].dtype
_snake_case : Tuple = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(snake_case , snake_case , dtype=snake_case )] , dim=1 )
return ordered_inputs
@property
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return 13
| 517 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 517 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class __magic_name__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : List[str] = 'tapas'
def __init__( self : Any , snake_case_ : List[str]=30522 , snake_case_ : List[Any]=768 , snake_case_ : List[Any]=12 , snake_case_ : Optional[Any]=12 , snake_case_ : Union[str, Any]=3072 , snake_case_ : int="gelu" , snake_case_ : Union[str, Any]=0.1 , snake_case_ : int=0.1 , snake_case_ : Union[str, Any]=1024 , snake_case_ : Union[str, Any]=[3, 256, 256, 2, 256, 256, 10] , snake_case_ : List[Any]=0.02 , snake_case_ : Union[str, Any]=1e-12 , snake_case_ : str=0 , snake_case_ : str=10.0 , snake_case_ : Any=0 , snake_case_ : str=1.0 , snake_case_ : Tuple=None , snake_case_ : Optional[Any]=1.0 , snake_case_ : int=False , snake_case_ : Tuple=None , snake_case_ : Tuple=1.0 , snake_case_ : Tuple=1.0 , snake_case_ : int=False , snake_case_ : int=False , snake_case_ : int="ratio" , snake_case_ : Optional[Any]=None , snake_case_ : int=None , snake_case_ : Any=64 , snake_case_ : List[Any]=32 , snake_case_ : Tuple=False , snake_case_ : Tuple=True , snake_case_ : List[str]=False , snake_case_ : Dict=False , snake_case_ : Any=True , snake_case_ : Dict=False , snake_case_ : str=None , snake_case_ : Dict=None , **snake_case_ : int , ):
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_sizes
__snake_case = initializer_range
__snake_case = layer_norm_eps
# Fine-tuning task hyperparameters
__snake_case = positive_label_weight
__snake_case = num_aggregation_labels
__snake_case = aggregation_loss_weight
__snake_case = use_answer_as_supervision
__snake_case = answer_loss_importance
__snake_case = use_normalized_answer_loss
__snake_case = huber_loss_delta
__snake_case = temperature
__snake_case = aggregation_temperature
__snake_case = use_gumbel_for_cells
__snake_case = use_gumbel_for_aggregation
__snake_case = average_approximation_function
__snake_case = cell_selection_preference
__snake_case = answer_loss_cutoff
__snake_case = max_num_rows
__snake_case = max_num_columns
__snake_case = average_logits_per_cell
__snake_case = select_one_column
__snake_case = allow_empty_column_selection
__snake_case = init_cell_selection_weights_to_zero
__snake_case = reset_position_index_per_cell
__snake_case = disable_per_token_loss
# Aggregation hyperparameters
__snake_case = aggregation_labels
__snake_case = no_aggregation_label_index
if isinstance(self.aggregation_labels , snake_case_ ):
__snake_case = {int(snake_case_ ): v for k, v in aggregation_labels.items()}
| 614 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class __magic_name__ ( lowercase__ ):
def __init__( self : Tuple , **snake_case_ : str ):
super().__init__(**snake_case_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[Any] , snake_case_ : Union[str, List[str], "Image", List["Image"]] , **snake_case_ : Optional[int] ):
return super().__call__(snake_case_ , **snake_case_ )
def lowerCAmelCase ( self : Tuple , **snake_case_ : Any ):
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
__snake_case = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def lowerCAmelCase ( self : int , snake_case_ : List[str] , snake_case_ : List[str]=None , snake_case_ : int="This is a photo of {}." ):
__snake_case = load_image(snake_case_ )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(snake_case_ ) for x in candidate_labels]
__snake_case = self.tokenizer(snake_case_ , return_tensors=self.framework , padding=snake_case_ )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self : Any , snake_case_ : Dict ):
__snake_case = model_inputs.pop("candidate_labels" )
__snake_case = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , snake_case_ ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**snake_case_ , **snake_case_ )
__snake_case = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self : Optional[int] , snake_case_ : Any ):
__snake_case = model_outputs.pop("candidate_labels" )
__snake_case = model_outputs["logits"][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(snake_case_ , snake_case_ ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(snake_case_ , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(snake_case_ , snake_case_ ) , key=lambda snake_case_ : -x[0] )
]
return result
| 614 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowerCAmelCase_ = get_tests_dir('''fixtures''')
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Tuple ) ->Optional[int]:
# A mock response for an HTTP head request to emulate server down
snake_case_ = mock.Mock()
snake_case_ = 5_0_0
snake_case_ = {}
snake_case_ = HTTPError
snake_case_ = {}
# Download this model to make sure it's in the cache.
snake_case_ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_UpperCamelCase ) as mock_head:
snake_case_ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__( self : Any ) ->str:
# This test is for deprecated behavior and can be removed in v5
snake_case_ = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def snake_case__( self : Union[str, Any] ) ->Union[str, Any]:
with self.assertRaises(_UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case_ = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
snake_case_ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_UpperCamelCase )
@is_staging_test
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case__( cls : Optional[int] ) ->Tuple:
snake_case_ = TOKEN
HfFolder.save_token(_UpperCamelCase )
@classmethod
def snake_case__( cls : str ) ->List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def snake_case__( self : Optional[Any] ) ->Union[str, Any]:
snake_case_ = ViTImageProcessor.from_pretrained(_UpperCamelCase )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_UpperCamelCase , repo_id='''test-image-processor''' , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def snake_case__( self : str ) ->List[str]:
snake_case_ = ViTImageProcessor.from_pretrained(_UpperCamelCase )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_UpperCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def snake_case__( self : Tuple ) ->Union[str, Any]:
CustomImageProcessor.register_for_auto_class()
snake_case_ = CustomImageProcessor.from_pretrained(_UpperCamelCase )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
snake_case_ = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=_UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' ) | 39 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def A__ ( A : Optional[int]):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def A__ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Tuple = "mock-s3-bucket"
UpperCamelCase : List[str] = F'''s3://{mock_bucket}'''
UpperCamelCase : Optional[Any] = extract_path_from_uri(A)
assert dataset_path.startswith("s3://") is False
UpperCamelCase : Any = "./local/path"
UpperCamelCase : str = extract_path_from_uri(A)
assert dataset_path == new_dataset_path
def A__ ( A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : List[Any] = is_remote_filesystem(A)
assert is_remote is True
UpperCamelCase : Tuple = fsspec.filesystem("file")
UpperCamelCase : int = is_remote_filesystem(A)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , A)
def A__ ( A : List[Any] , A : Any , A : str , A : Union[str, Any] , A : List[str] , A : List[Any] , A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : List[str] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
UpperCamelCase : Any = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCamelCase : Any = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(A)
UpperCamelCase : List[str] = fsspec.filesystem(compression_fs_class.protocol , fo=A)
assert isinstance(A , A)
UpperCamelCase : List[Any] = os.path.basename(A)
UpperCamelCase : Union[str, Any] = expected_filename[: expected_filename.rindex(".")]
assert fs.glob("*") == [expected_filename]
with fs.open(A , "r" , encoding="utf-8") as f, open(A , encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"])
def A__ ( A : Optional[int] , A : str , A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : Any = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
UpperCamelCase : str = compressed_file_paths[protocol]
UpperCamelCase : Optional[int] = "dataset.jsonl"
UpperCamelCase : Tuple = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
UpperCamelCase , *UpperCamelCase : Dict = fsspec.get_fs_token_paths(A)
assert fs.isfile(A)
assert not fs.isfile("non_existing_" + member_file_path)
@pytest.mark.integration
def A__ ( A : Dict , A : List[str] , A : Dict , A : List[Any]):
'''simple docstring'''
UpperCamelCase : Optional[int] = hf_api.dataset_info(A , token=A)
UpperCamelCase : List[str] = HfFileSystem(repo_info=A , token=A)
assert sorted(hffs.glob("*")) == [".gitattributes", "data"]
assert hffs.isdir("data")
assert hffs.isfile(".gitattributes") and hffs.isfile("data/text_data.txt")
with open(A) as f:
assert hffs.open("data/text_data.txt" , "r").read() == f.read()
def A__ ( ):
'''simple docstring'''
UpperCamelCase : str = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(A , A , clobber=A)
with pytest.warns(A) as warning_info:
importlib.reload(datasets.filesystems)
assert len(A) == 1
assert (
str(warning_info[0].message)
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 173 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] =logging.get_logger(__name__)
_UpperCAmelCase : List[Any] ="""Hello world! cécé herlolip"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = FairseqRobertaModel.from_pretrained(A__ )
roberta.eval() # disable dropout
lowerCAmelCase_ : List[Any] = roberta.model.encoder.sentence_encoder
lowerCAmelCase_ : str = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
lowerCAmelCase_ : Tuple = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , A__ )
lowerCAmelCase_ : Union[str, Any] = XLMRobertaXLForSequenceClassification(A__ ) if classification_head else XLMRobertaXLForMaskedLM(A__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCAmelCase_ : Optional[Any] = roberta_sent_encoder.embed_tokens.weight
lowerCAmelCase_ : List[Any] = roberta_sent_encoder.embed_positions.weight
lowerCAmelCase_ : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowerCAmelCase_ : str = roberta_sent_encoder.layer_norm.weight
lowerCAmelCase_ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCAmelCase_ : int = model.roberta.encoder.layer[i]
lowerCAmelCase_ : List[str] = roberta_sent_encoder.layers[i]
lowerCAmelCase_ : Any = layer.attention
lowerCAmelCase_ : Any = roberta_layer.self_attn_layer_norm.weight
lowerCAmelCase_ : str = roberta_layer.self_attn_layer_norm.bias
# self attention
lowerCAmelCase_ : Tuple = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowerCAmelCase_ : Tuple = roberta_layer.self_attn.q_proj.weight
lowerCAmelCase_ : Optional[Any] = roberta_layer.self_attn.q_proj.bias
lowerCAmelCase_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight
lowerCAmelCase_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
lowerCAmelCase_ : List[Any] = roberta_layer.self_attn.v_proj.weight
lowerCAmelCase_ : str = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowerCAmelCase_ : Any = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowerCAmelCase_ : int = roberta_layer.self_attn.out_proj.weight
lowerCAmelCase_ : Dict = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowerCAmelCase_ : Tuple = roberta_layer.final_layer_norm.weight
lowerCAmelCase_ : List[Any] = roberta_layer.final_layer_norm.bias
# intermediate
lowerCAmelCase_ : List[str] = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCAmelCase_ : Optional[Any] = roberta_layer.fca.weight
lowerCAmelCase_ : List[str] = roberta_layer.fca.bias
# output
lowerCAmelCase_ : Tuple = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCAmelCase_ : Optional[int] = roberta_layer.fca.weight
lowerCAmelCase_ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
lowerCAmelCase_ : Tuple = roberta.model.classification_heads['''mnli'''].dense.weight
lowerCAmelCase_ : Optional[int] = roberta.model.classification_heads['''mnli'''].dense.bias
lowerCAmelCase_ : Optional[Any] = roberta.model.classification_heads['''mnli'''].out_proj.weight
lowerCAmelCase_ : Tuple = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCAmelCase_ : Tuple = roberta.model.encoder.lm_head.dense.weight
lowerCAmelCase_ : Tuple = roberta.model.encoder.lm_head.dense.bias
lowerCAmelCase_ : int = roberta.model.encoder.lm_head.layer_norm.weight
lowerCAmelCase_ : Tuple = roberta.model.encoder.lm_head.layer_norm.bias
lowerCAmelCase_ : Union[str, Any] = roberta.model.encoder.lm_head.weight
lowerCAmelCase_ : Optional[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCAmelCase_ : Optional[Any] = roberta.encode(A__ ).unsqueeze(0 ) # batch of size 1
lowerCAmelCase_ : int = model(A__ )[0]
if classification_head:
lowerCAmelCase_ : Any = roberta.model.classification_heads['''mnli'''](roberta.extract_features(A__ ) )
else:
lowerCAmelCase_ : List[Any] = roberta.model(A__ )[0]
print(our_output.shape , their_output.shape )
lowerCAmelCase_ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCAmelCase_ : Optional[Any] = torch.allclose(A__ , A__ , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(A__ ).mkdir(parents=A__ , exist_ok=A__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
if __name__ == "__main__":
_UpperCAmelCase : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
_UpperCAmelCase : Union[str, Any] =parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
) | 714 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int ) -> None:
'''simple docstring'''
_UpperCAmelCase = generate_pascal_triangle(__lowercase )
for row_idx in range(__lowercase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def UpperCAmelCase_ ( __lowercase : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_UpperCAmelCase = []
for current_row_idx in range(__lowercase ):
_UpperCAmelCase = populate_current_row(__lowercase , __lowercase )
triangle.append(__lowercase )
return triangle
def UpperCAmelCase_ ( __lowercase : list[list[int]] , __lowercase : int ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_UpperCAmelCase , _UpperCAmelCase = 1, 1
for current_col_idx in range(1 , __lowercase ):
calculate_current_element(
__lowercase , __lowercase , __lowercase , __lowercase )
return current_row
def UpperCAmelCase_ ( __lowercase : list[list[int]] , __lowercase : list[int] , __lowercase : int , __lowercase : int , ) -> None:
'''simple docstring'''
_UpperCAmelCase = triangle[current_row_idx - 1][current_col_idx - 1]
_UpperCAmelCase = triangle[current_row_idx - 1][current_col_idx]
_UpperCAmelCase = above_to_left_elt + above_to_right_elt
def UpperCAmelCase_ ( __lowercase : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_UpperCAmelCase = [[1]]
for row_index in range(1 , __lowercase ):
_UpperCAmelCase = [0] + result[-1] + [0]
_UpperCAmelCase = row_index + 1
# Calculate the number of distinct elements in a row
_UpperCAmelCase = sum(divmod(__lowercase , 2 ) )
_UpperCAmelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
_UpperCAmelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_UpperCAmelCase = row_first_half + row_second_half
result.append(__lowercase )
return result
def UpperCAmelCase_ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowercase : Callable , __lowercase : int ) -> None:
_UpperCAmelCase = f'{func.__name__}({value})'
_UpperCAmelCase = timeit(f'__main__.{call}' , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowercase , __lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 236 | 0 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
__a : Tuple = 42
class _UpperCamelCase ( UpperCamelCase__ ,UpperCamelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCAmelCase__ = 16 , lowerCAmelCase__ = 88 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 32 , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = "geglu" , lowerCAmelCase__ = True , lowerCAmelCase__ = True , ) -> List[Any]:
'''simple docstring'''
super().__init__()
__lowercase = num_attention_heads
__lowercase = attention_head_dim
__lowercase = num_attention_heads * attention_head_dim
__lowercase = in_channels
__lowercase = torch.nn.GroupNorm(num_groups=__A , num_channels=__A , eps=1E-6 , affine=__A )
__lowercase = nn.Linear(__A , __A )
# 3. Define transformers blocks
__lowercase = nn.ModuleList(
[
BasicTransformerBlock(
__A , __A , __A , dropout=__A , cross_attention_dim=__A , activation_fn=__A , attention_bias=__A , double_self_attention=__A , norm_elementwise_affine=__A , )
for d in range(__A )
] )
__lowercase = nn.Linear(__A , __A )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=1 , lowerCAmelCase__=None , lowerCAmelCase__ = True , ) -> List[str]:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = hidden_states.shape
__lowercase = batch_frames // num_frames
__lowercase = hidden_states
__lowercase = hidden_states[None, :].reshape(__A , __A , __A , __A , __A )
__lowercase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__lowercase = self.norm(__A )
__lowercase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __A , __A )
__lowercase = self.proj_in(__A )
# 2. Blocks
for block in self.transformer_blocks:
__lowercase = block(
__A , encoder_hidden_states=__A , timestep=__A , cross_attention_kwargs=__A , class_labels=__A , )
# 3. Output
__lowercase = self.proj_out(__A )
__lowercase = (
hidden_states[None, None, :]
.reshape(__A , __A , __A , __A , __A )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__lowercase = hidden_states.reshape(__A , __A , __A , __A )
__lowercase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__A ) | 715 | from __future__ import annotations
__a : str = """Muhammad Umer Farooq"""
__a : Optional[Any] = """MIT"""
__a : int = """1.0.0"""
__a : Optional[int] = """Muhammad Umer Farooq"""
__a : Dict = """[email protected]"""
__a : Optional[Any] = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
super().__init__()
__lowercase = []
__lowercase = domain
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__lowercase = parse.urljoin(self.domain , lowerCAmelCase__ )
self.urls.append(lowerCAmelCase__ )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return ".".join(get_sub_domain_name(lowercase ).split('''.''' )[-2:] )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return parse.urlparse(lowercase ).netloc
def UpperCAmelCase ( lowercase = "https://github.com" ):
"""simple docstring"""
__lowercase = get_domain_name(lowercase )
# Initialize the parser
__lowercase = Parser(lowercase )
try:
# Open URL
__lowercase = requests.get(lowercase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__lowercase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__lowercase = requests.get(lowercase )
# Get the valid email.
__lowercase = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowercase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowercase )
if __name__ == "__main__":
__a : Union[str, Any] = emails_from_url("""https://github.com""")
print(F'''{len(emails)} emails found:''')
print("""\n""".join(sorted(emails))) | 522 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_UpperCamelCase : int = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[Any] = ["pixel_values"]
def __init__( self , a = True , a = 3_2 , a=PILImageResampling.BILINEAR , a = True , **a , ) -> None:
lowercase__ : Union[str, Any] = do_resize
lowercase__ : Dict = do_rescale
lowercase__ : Union[str, Any] = size_divisor
lowercase__ : int = resample
super().__init__(**a )
def _UpperCAmelCase ( self , a , a , a , a = None , **a ) -> np.ndarray:
lowercase__ , lowercase__ : Optional[int] = get_image_size(a )
# Rounds the height and width down to the closest multiple of size_divisor
lowercase__ : Optional[Any] = height // size_divisor * size_divisor
lowercase__ : Any = width // size_divisor * size_divisor
lowercase__ : int = resize(a , (new_h, new_w) , resample=a , data_format=a , **a )
return image
def _UpperCAmelCase ( self , a , a , a = None , **a ) -> np.ndarray:
return rescale(image=a , scale=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a = None , a = None , a=None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> BatchFeature:
lowercase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowercase__ : Any = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Any = size_divisor if size_divisor is not None else self.size_divisor
lowercase__ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
lowercase__ : Tuple = make_list_of_images(a )
if not valid_images(a ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
lowercase__ : str = [to_numpy_array(a ) for img in images]
if do_resize:
lowercase__ : Dict = [self.resize(a , size_divisor=a , resample=a ) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(a , scale=1 / 2_5_5 ) for image in images]
lowercase__ : List[Any] = [to_channel_dimension_format(a , a ) for image in images]
lowercase__ : Optional[int] = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
| 599 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_UpperCamelCase : Any = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
def __init__( self , *a , **a ) -> None:
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 599 | 1 |
"""simple docstring"""
import math
import unittest
def lowercase__ ( lowerCAmelCase : int ) -> bool:
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _UpperCAmelCase ( unittest.TestCase ):
def a_ ( self ) -> int:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def a_ ( self ) -> int:
with self.assertRaises(lowercase_ ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 183 |
"""simple docstring"""
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : int ) -> int:
"""simple docstring"""
return number | (1 << position)
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : int ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : int ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : int ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : int ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 183 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : str = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCamelCase : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase : int = DDPMScheduler()
_UpperCamelCase : Optional[int] = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case , steps=4 )
_UpperCamelCase : Union[str, Any] = output.audios[0]
_UpperCamelCase : Union[str, Any] = output.images[0]
_UpperCamelCase : str = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : int = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case )
_UpperCamelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase : List[str] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : str = self.dummy_vqvae_and_unet
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Tuple = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10 )
_UpperCamelCase : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Tuple = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : Any = self.dummy_unet_condition
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : int = torch.rand((1, 1, 10) )
_UpperCamelCase : Optional[Any] = pipe(generator=_snake_case , encoding=_snake_case )
_UpperCamelCase : Dict = output.images[0]
_UpperCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = torch_device
_UpperCamelCase : int = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCamelCase : str = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case )
_UpperCamelCase : List[Any] = output.audios[0]
_UpperCamelCase : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 683 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : Tuple = {"""vocab_file""": """sentencepiece.bpe.model"""}
snake_case_ : List[str] = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
snake_case_ : List[Any] = {
"""camembert-base""": 5_1_2,
}
snake_case_ : Any = """▁"""
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowercase : Union[str, Any] , lowercase : str="<s>" , lowercase : str="</s>" , lowercase : Optional[int]="</s>" , lowercase : Dict="<s>" , lowercase : Optional[Any]="<unk>" , lowercase : List[Any]="<pad>" , lowercase : Any="<mask>" , lowercase : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] , lowercase : Optional[Dict[str, Any]] = None , **lowercase : Dict , ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase ) )
UpperCAmelCase : str = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
UpperCAmelCase : Union[str, Any] = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
UpperCAmelCase : Union[str, Any] = len(self.fairseq_tokens_to_ids )
UpperCAmelCase : Dict = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
UpperCAmelCase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __lowerCAmelCase ( self : Union[str, Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Dict = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self : Dict , lowercase : List[int] , lowercase : Optional[List[int]] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1]
def __lowerCAmelCase ( self : Tuple , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.sep_token_id]
UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Optional[Any] , lowercase : str ):
'''simple docstring'''
return self.sp_model.encode(lowercase , out_type=lowercase )
def __lowerCAmelCase ( self : int , lowercase : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(lowercase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(lowercase )
def __lowerCAmelCase ( self : Any , lowercase : Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self : Tuple , lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
UpperCAmelCase : Tuple = ""
UpperCAmelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase ) + token
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : str = []
else:
current_sub_tokens.append(lowercase )
UpperCAmelCase : Any = False
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def __getstate__( self : str ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.__dict__.copy()
UpperCAmelCase : Dict = None
return state
def __setstate__( self : List[str] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase : Any = {}
UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Optional[Any] , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : Optional[Any] = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , "wb" ) as fi:
UpperCAmelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 292 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class snake_case__ :
SCREAMING_SNAKE_CASE__ = BlenderbotSmallConfig
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = '''gelu'''
def __init__( self : Tuple , lowercase : Any , lowercase : Optional[Any]=13 , lowercase : Any=7 , lowercase : List[Any]=True , lowercase : List[str]=False , lowercase : Optional[Any]=99 , lowercase : Union[str, Any]=32 , lowercase : List[Any]=2 , lowercase : Tuple=4 , lowercase : Union[str, Any]=37 , lowercase : str=0.1 , lowercase : Optional[Any]=0.1 , lowercase : Optional[Any]=20 , lowercase : str=2 , lowercase : int=1 , lowercase : Optional[int]=0 , ):
'''simple docstring'''
UpperCAmelCase : Dict = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : str = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : Tuple = hidden_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : List[Any] = pad_token_id
UpperCAmelCase : Union[str, Any] = bos_token_id
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : Optional[Any] = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCAmelCase ( self : int , lowercase : List[Any] , lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = TFBlenderbotSmallModel(config=lowercase ).get_decoder()
UpperCAmelCase : List[Any] = inputs_dict["input_ids"]
UpperCAmelCase : int = input_ids[:1, :]
UpperCAmelCase : Union[str, Any] = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase : Union[str, Any] = inputs_dict["head_mask"]
UpperCAmelCase : List[Any] = 1
# first forward pass
UpperCAmelCase : Optional[Any] = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
UpperCAmelCase , UpperCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase : Optional[Any] = model(lowercase , attention_mask=lowercase )[0]
UpperCAmelCase : List[str] = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def lowercase_ ( _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : Tuple=None , _lowercase : Any=None , _lowercase : str=None , _lowercase : Optional[Any]=None , _lowercase : Dict=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase : List[str] = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Dict = TFBlenderbotSmallModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=lowercase )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_tokenizers
@require_tf
class snake_case__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
SCREAMING_SNAKE_CASE__ = '''facebook/blenderbot_small-90M'''
@cached_property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.tokenizer(self.src_text , return_tensors="tf" )
UpperCAmelCase : List[Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
UpperCAmelCase : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 292 | 1 |
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase ) -> list:
lowerCAmelCase__ : int = len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCAmelCase__ , lowerCAmelCase__ : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_A = list(range(1_0, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 299 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
_A = TypeVar("""T""")
_A = TypeVar("""U""")
class _lowerCamelCase ( Generic[T, U] ):
def __init__( self : List[Any] , UpperCamelCase : T | None , UpperCamelCase : U | None ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = key
lowerCAmelCase__ : Union[str, Any] = val
lowerCAmelCase__ : DoubleLinkedListNode[T, U] | None = None
lowerCAmelCase__ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _lowerCamelCase ( Generic[T, U] ):
def __init__( self : int ) -> None:
"""simple docstring"""
lowerCAmelCase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.rear, self.head
def __repr__( self : str ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = ["""DoubleLinkedList"""]
lowerCAmelCase__ : int = self.head
while node.next is not None:
rep.append(str(UpperCamelCase ) )
lowerCAmelCase__ : Optional[Any] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : DoubleLinkedListNode[T, U] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowerCAmelCase__ : List[str] = node
lowerCAmelCase__ : int = previous
lowerCAmelCase__ : Optional[int] = node
lowerCAmelCase__ : Dict = self.rear
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
lowerCAmelCase__ : int = node.next
lowerCAmelCase__ : str = node.prev
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : List[Any] = None
return node
class _lowerCamelCase ( Generic[T, U] ):
_lowerCamelCase :dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : Dict , UpperCamelCase : int ) -> str:
"""simple docstring"""
lowerCAmelCase__ : DoubleLinkedList[T, U] = DoubleLinkedList()
lowerCAmelCase__ : Tuple = capacity
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Optional[Any] ) -> str:
"""simple docstring"""
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : Optional[Any] , UpperCamelCase : T ) -> bool:
"""simple docstring"""
return key in self.cache
def _lowerCAmelCase ( self : Any , UpperCamelCase : T ) -> U | None:
"""simple docstring"""
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
lowerCAmelCase__ : DoubleLinkedListNode[T, U] = self.cache[key]
lowerCAmelCase__ : Any = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(UpperCamelCase )
return node.val
self.miss += 1
return None
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : T , UpperCamelCase : U ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowerCAmelCase__ : List[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(UpperCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowerCAmelCase__ : Union[str, Any] = DoubleLinkedListNode(UpperCamelCase , UpperCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowerCAmelCase__ : Dict = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowerCAmelCase__ : Any = value
self.list.add(UpperCamelCase )
@classmethod
def _lowerCAmelCase ( cls : Any , UpperCamelCase : int = 1_28 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(UpperCamelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*UpperCamelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowerCAmelCase__ : List[Any] = LRUCache(UpperCamelCase )
lowerCAmelCase__ : Dict = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowerCAmelCase__ : str = func(*UpperCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , UpperCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(UpperCamelCase , """cache_info""" , UpperCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299 | 1 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Dict = (DDPMScheduler,)
def UpperCAmelCase_ ( self : Optional[Any] , **_A : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_A )
return config
def UpperCAmelCase_ ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def UpperCAmelCase_ ( self : str ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_A )
def UpperCAmelCase_ ( self : str ) -> Any:
"""simple docstring"""
snake_case_ : Optional[int] = self.scheduler_classes[0]
snake_case_ : Tuple = self.get_scheduler_config()
snake_case_ : Dict = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def UpperCAmelCase_ ( self : Any ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = self.scheduler_classes[0]
snake_case_ : List[Any] = self.get_scheduler_config()
snake_case_ : Any = scheduler_class(**_A )
snake_case_ : int = len(_A )
snake_case_ : Dict = self.dummy_model()
snake_case_ : Optional[Any] = self.dummy_sample_deter
snake_case_ : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
snake_case_ : Optional[int] = model(_A , _A )
# 2. predict previous mean of sample x_t-1
snake_case_ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case_ : List[str] = pred_prev_sample
snake_case_ : int = torch.sum(torch.abs(_A ) )
snake_case_ : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
"""simple docstring"""
snake_case_ : str = self.scheduler_classes[0]
snake_case_ : int = self.get_scheduler_config(prediction_type='v_prediction' )
snake_case_ : Tuple = scheduler_class(**_A )
snake_case_ : Dict = len(_A )
snake_case_ : Dict = self.dummy_model()
snake_case_ : Dict = self.dummy_sample_deter
snake_case_ : List[str] = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
snake_case_ : List[str] = model(_A , _A )
# 2. predict previous mean of sample x_t-1
snake_case_ : List[Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case_ : Optional[Any] = pred_prev_sample
snake_case_ : Any = torch.sum(torch.abs(_A ) )
snake_case_ : List[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def UpperCAmelCase_ ( self : List[str] ) -> int:
"""simple docstring"""
snake_case_ : str = self.scheduler_classes[0]
snake_case_ : Union[str, Any] = self.get_scheduler_config()
snake_case_ : Any = scheduler_class(**_A )
snake_case_ : str = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_A )
snake_case_ : Tuple = scheduler.timesteps
for i, timestep in enumerate(_A ):
if i == len(_A ) - 1:
snake_case_ : Tuple = -1
else:
snake_case_ : Union[str, Any] = timesteps[i + 1]
snake_case_ : str = scheduler.previous_timestep(_A )
snake_case_ : Optional[int] = prev_t.item()
self.assertEqual(_A , _A )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = self.scheduler_classes[0]
snake_case_ : Union[str, Any] = self.get_scheduler_config()
snake_case_ : List[str] = scheduler_class(**_A )
snake_case_ : Tuple = [100, 87, 50, 51, 0]
with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_A )
def UpperCAmelCase_ ( self : Dict ) -> int:
"""simple docstring"""
snake_case_ : Union[str, Any] = self.scheduler_classes[0]
snake_case_ : int = self.get_scheduler_config()
snake_case_ : List[str] = scheduler_class(**_A )
snake_case_ : int = [100, 87, 50, 1, 0]
snake_case_ : Any = len(_A )
with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
snake_case_ : str = self.scheduler_classes[0]
snake_case_ : List[str] = self.get_scheduler_config()
snake_case_ : Optional[Any] = scheduler_class(**_A )
snake_case_ : Union[str, Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_A )
| 534 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( __a ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 534 | 1 |
def _snake_case ( __snake_case , __snake_case , __snake_case ):
def update_area_of_max_square(__snake_case , __snake_case ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_UpperCamelCase = update_area_of_max_square(__snake_case , col + 1 )
_UpperCamelCase = update_area_of_max_square(row + 1 , col + 1 )
_UpperCamelCase = update_area_of_max_square(row + 1 , __snake_case )
if mat[row][col]:
_UpperCamelCase = 1 + min([right, diagonal, down] )
_UpperCamelCase = max(largest_square_area[0] , __snake_case )
return sub_problem_sol
else:
return 0
_UpperCamelCase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _snake_case ( __snake_case , __snake_case , __snake_case ):
def update_area_of_max_square_using_dp_array(
__snake_case , __snake_case , __snake_case ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_UpperCamelCase = update_area_of_max_square_using_dp_array(__snake_case , col + 1 , __snake_case )
_UpperCamelCase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , __snake_case )
_UpperCamelCase = update_area_of_max_square_using_dp_array(row + 1 , __snake_case , __snake_case )
if mat[row][col]:
_UpperCamelCase = 1 + min([right, diagonal, down] )
_UpperCamelCase = max(largest_square_area[0] , __snake_case )
_UpperCamelCase = sub_problem_sol
return sub_problem_sol
else:
return 0
_UpperCamelCase = [0]
_UpperCamelCase = [[-1] * cols for _ in range(__snake_case )]
update_area_of_max_square_using_dp_array(0 , 0 , __snake_case )
return largest_square_area[0]
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = [[0] * (cols + 1) for _ in range(rows + 1 )]
_UpperCamelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_UpperCamelCase = dp_array[row][col + 1]
_UpperCamelCase = dp_array[row + 1][col + 1]
_UpperCamelCase = dp_array[row + 1][col]
if mat[row][col] == 1:
_UpperCamelCase = 1 + min(__snake_case , __snake_case , __snake_case )
_UpperCamelCase = max(dp_array[row][col] , __snake_case )
else:
_UpperCamelCase = 0
return largest_square_area
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = [0] * (cols + 1)
_UpperCamelCase = [0] * (cols + 1)
_UpperCamelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_UpperCamelCase = current_row[col + 1]
_UpperCamelCase = next_row[col + 1]
_UpperCamelCase = next_row[col]
if mat[row][col] == 1:
_UpperCamelCase = 1 + min(__snake_case , __snake_case , __snake_case )
_UpperCamelCase = max(current_row[col] , __snake_case )
else:
_UpperCamelCase = 0
_UpperCamelCase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 10 |
"""simple docstring"""
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCamelCase__ = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
lowerCamelCase__ = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
lowerCamelCase__ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
lowerCamelCase__ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
lowerCamelCase__ = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
lowerCamelCase__ = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
lowerCamelCase__ = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
lowerCamelCase__ = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
lowerCamelCase__ = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
lowerCamelCase__ = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
lowerCamelCase__ = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
lowerCamelCase__ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
lowerCamelCase__ = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
lowerCamelCase__ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
lowerCamelCase__ = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
lowerCamelCase__ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
lowerCamelCase__ = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
lowerCamelCase__ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
lowerCamelCase__ = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
lowerCamelCase__ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
lowerCamelCase__ = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
lowerCamelCase__ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
lowerCamelCase__ = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
lowerCamelCase__ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
lowerCamelCase__ = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
lowerCamelCase__ = ""
lowerCamelCase__ = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
lowerCamelCase__ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
lowerCamelCase__ = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Any , UpperCamelCase : int ):
assert ReadMe.from_string(UpperCamelCase , UpperCamelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : int , UpperCamelCase : Optional[Any] ):
with pytest.raises(UpperCamelCase , match=re.escape(expected_error.format(path="""root""" ) ) ):
A__ = ReadMe.from_string(UpperCamelCase , UpperCamelCase )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] , UpperCamelCase : str ):
with pytest.raises(UpperCamelCase , match=re.escape(expected_error.format(path="""root""" ) ) ):
ReadMe.from_string(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : str ):
ReadMe.from_string(UpperCamelCase , UpperCamelCase , suppress_parsing_errors=UpperCamelCase )
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] , UpperCamelCase : Dict ):
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase ) / """README.md"""
with open(UpperCamelCase , """w+""" ) as readme_file:
readme_file.write(UpperCamelCase )
A__ = ReadMe.from_readme(UpperCamelCase , UpperCamelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Any , UpperCamelCase : Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase ) / """README.md"""
with open(UpperCamelCase , """w+""" ) as readme_file:
readme_file.write(UpperCamelCase )
A__ = expected_error.format(path=UpperCamelCase )
with pytest.raises(UpperCamelCase , match=re.escape(UpperCamelCase ) ):
A__ = ReadMe.from_readme(UpperCamelCase , UpperCamelCase )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase ) / """README.md"""
with open(UpperCamelCase , """w+""" ) as readme_file:
readme_file.write(UpperCamelCase )
A__ = expected_error.format(path=UpperCamelCase )
with pytest.raises(UpperCamelCase , match=re.escape(UpperCamelCase ) ):
ReadMe.from_readme(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Tuple ):
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase ) / """README.md"""
with open(UpperCamelCase , """w+""" ) as readme_file:
readme_file.write(UpperCamelCase )
ReadMe.from_readme(UpperCamelCase , UpperCamelCase , suppress_parsing_errors=UpperCamelCase )
| 574 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
lowerCamelCase__ =(
"Wrong input data's dimensions... "
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCamelCase__ =(
"Wrong input data's shape... "
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
lowerCamelCase__ =(
"Input data have different datatype... "
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(__lowerCAmelCase )
lowerCamelCase__ =[]
for value in value_array:
lowerCamelCase__ =euclidean(__lowerCAmelCase , dataset[0] )
lowerCamelCase__ =dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCamelCase__ =euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
lowerCamelCase__ =temp_dist
lowerCamelCase__ =dataset_value.tolist()
answer.append([vector, dist] )
return answer
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 | """simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
a =1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , _lowerCamelCase=16 , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=14 , _lowerCamelCase=10 , _lowerCamelCase=19 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=True , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=[1, 2, 3, 4, 5] , _lowerCamelCase=25 , _lowerCamelCase=5 , ):
lowerCamelCase__ =d_model
lowerCamelCase__ =parent
lowerCamelCase__ =batch_size
lowerCamelCase__ =prediction_length
lowerCamelCase__ =context_length
lowerCamelCase__ =cardinality
lowerCamelCase__ =num_time_features
lowerCamelCase__ =lags_sequence
lowerCamelCase__ =embedding_dimension
lowerCamelCase__ =is_training
lowerCamelCase__ =hidden_size
lowerCamelCase__ =num_hidden_layers
lowerCamelCase__ =num_attention_heads
lowerCamelCase__ =intermediate_size
lowerCamelCase__ =hidden_act
lowerCamelCase__ =hidden_dropout_prob
lowerCamelCase__ =attention_probs_dropout_prob
lowerCamelCase__ =context_length
lowerCamelCase__ =prediction_length + label_length
lowerCamelCase__ =label_length
lowerCamelCase__ =moving_average
lowerCamelCase__ =autocorrelation_factor
def _a ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _a ( self , _lowerCamelCase ):
lowerCamelCase__ =config.context_length + max(config.lags_sequence )
lowerCamelCase__ =ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCamelCase__ =floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCamelCase__ =floats_tensor([self.batch_size, _past_length] )
lowerCamelCase__ =floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCamelCase__ =floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCamelCase__ =floats_tensor([self.batch_size, config.prediction_length] )
lowerCamelCase__ ={
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def _a ( self ):
lowerCamelCase__ =self.get_config()
lowerCamelCase__ =self.prepare_autoformer_inputs_dict(_lowerCamelCase )
return config, inputs_dict
def _a ( self ):
lowerCamelCase__ , lowerCamelCase__ =self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =AutoformerModel(config=_lowerCamelCase ).to(_lowerCamelCase ).eval()
lowerCamelCase__ =model(**_lowerCamelCase )
lowerCamelCase__ =outputs.encoder_last_hidden_state
lowerCamelCase__ =outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ =model.get_encoder()
encoder.save_pretrained(_lowerCamelCase )
lowerCamelCase__ =AutoformerEncoder.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =model.create_network_inputs(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCamelCase__ =torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCamelCase__ =encoder(inputs_embeds=_lowerCamelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
lowerCamelCase__ =(
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCamelCase__ =torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCamelCase__ =torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCamelCase__ =torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ =model.get_decoder()
decoder.save_pretrained(_lowerCamelCase )
lowerCamelCase__ =AutoformerDecoder.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
lowerCamelCase__ =decoder(
trend=_lowerCamelCase , inputs_embeds=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __UpperCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
A__ : Any = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
A__ : Union[str, Any] = (AutoformerForPrediction,) if is_torch_available() else ()
A__ : List[Any] = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
A__ : Optional[Any] = False
A__ : Optional[Any] = False
A__ : Any = False
A__ : List[Any] = False
A__ : Union[str, Any] = False
A__ : Tuple = False
def _a ( self ):
lowerCamelCase__ =AutoformerModelTester(self )
lowerCamelCase__ =ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
lowerCamelCase__ , lowerCamelCase__ =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase__ =model_class(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =model_class.from_pretrained(_lowerCamelCase , output_loading_info=_lowerCamelCase )
self.assertEqual(info["missing_keys"] , [] )
def _a ( self ):
lowerCamelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_lowerCamelCase )
@unittest.skip(reason="Model has no tokens embeddings" )
def _a ( self ):
pass
def _a ( self ):
lowerCamelCase__ =inspect.signature(getattr(_lowerCamelCase , "forward" ) )
# The main input is the name of the argument after `self`
lowerCamelCase__ =list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _lowerCamelCase )
def _a ( self ):
lowerCamelCase__ , lowerCamelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ =model_class(_lowerCamelCase )
lowerCamelCase__ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ =[*signature.parameters.keys()]
lowerCamelCase__ =[
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(_lowerCamelCase )] , _lowerCamelCase )
def _a ( self ):
lowerCamelCase__ , lowerCamelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ =True
lowerCamelCase__ =getattr(self.model_tester , "seq_length" , _lowerCamelCase )
lowerCamelCase__ =getattr(self.model_tester , "decoder_seq_length" , _lowerCamelCase )
lowerCamelCase__ =getattr(self.model_tester , "encoder_seq_length" , _lowerCamelCase )
lowerCamelCase__ =getattr(self.model_tester , "d_model" , _lowerCamelCase )
lowerCamelCase__ =getattr(self.model_tester , "num_attention_heads" , _lowerCamelCase )
lowerCamelCase__ =d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCamelCase__ =True
lowerCamelCase__ =False
lowerCamelCase__ =True
lowerCamelCase__ =model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ =True
lowerCamelCase__ =model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ =outputs.encoder_attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCamelCase__ =len(_lowerCamelCase )
lowerCamelCase__ =7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
# decoder attentions
lowerCamelCase__ =outputs.decoder_attentions
self.assertIsInstance(_lowerCamelCase , (list, tuple) )
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCamelCase__ =outputs.cross_attentions
self.assertIsInstance(_lowerCamelCase , (list, tuple) )
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(out_len + 2 , len(_lowerCamelCase ) )
lowerCamelCase__ =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _a ( self ):
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase_ ( __lowerCAmelCase="train-batch.pt" ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ =hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowerCAmelCase , repo_type="dataset" )
lowerCamelCase__ =torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
return batch
@require_torch
@slow
class __UpperCAmelCase ( unittest.TestCase ):
def _a ( self ):
lowerCamelCase__ =AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(_lowerCamelCase )
lowerCamelCase__ =prepare_batch()
with torch.no_grad():
lowerCamelCase__ =model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
lowerCamelCase__ =torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _lowerCamelCase )
lowerCamelCase__ =torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=_lowerCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _a ( self ):
lowerCamelCase__ =AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(_lowerCamelCase )
lowerCamelCase__ =prepare_batch("val-batch.pt" )
with torch.no_grad():
lowerCamelCase__ =model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
lowerCamelCase__ =torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _lowerCamelCase )
lowerCamelCase__ =torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=_lowerCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _a ( self ):
lowerCamelCase__ =AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(_lowerCamelCase )
lowerCamelCase__ =prepare_batch("val-batch.pt" )
with torch.no_grad():
lowerCamelCase__ =model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
lowerCamelCase__ =torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _lowerCamelCase )
lowerCamelCase__ =torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=_lowerCamelCase )
lowerCamelCase__ =outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _lowerCamelCase , rtol=1E-1 ) )
| 132 | 0 |
import argparse
from collections import defaultdict
import yaml
_lowerCamelCase = 'docs/source/en/_toctree.yml'
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = defaultdict(_snake_case )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(_snake_case )
SCREAMING_SNAKE_CASE__ = new_doc_list
SCREAMING_SNAKE_CASE__ = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE__ = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE__ = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(_snake_case ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
SCREAMING_SNAKE_CASE__ = sorted(_snake_case , key=lambda UpperCamelCase__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_snake_case ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(_snake_case )
# Sort
return overview_doc
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str=False ):
with open(_snake_case , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE__ = content[api_idx]["""sections"""]
# Then to the model doc
SCREAMING_SNAKE_CASE__ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
SCREAMING_SNAKE_CASE__ = api_doc[scheduler_idx]["""sections"""]
SCREAMING_SNAKE_CASE__ = clean_doc_toc(_snake_case )
SCREAMING_SNAKE_CASE__ = False
if new_scheduler_doc != scheduler_doc:
SCREAMING_SNAKE_CASE__ = True
if overwrite:
SCREAMING_SNAKE_CASE__ = new_scheduler_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE__ = api_doc
with open(_snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(_snake_case , allow_unicode=_snake_case ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str]=False ):
with open(_snake_case , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE__ = content[api_idx]["""sections"""]
# Then to the model doc
SCREAMING_SNAKE_CASE__ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = api_doc[pipeline_idx]["""sections"""]
SCREAMING_SNAKE_CASE__ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
SCREAMING_SNAKE_CASE__ = pipeline_doc["""section"""]
SCREAMING_SNAKE_CASE__ = clean_doc_toc(_snake_case )
if overwrite:
SCREAMING_SNAKE_CASE__ = new_sub_pipeline_doc
new_pipeline_docs.append(_snake_case )
# sort overall pipeline doc
SCREAMING_SNAKE_CASE__ = clean_doc_toc(_snake_case )
if new_pipeline_docs != pipeline_docs:
SCREAMING_SNAKE_CASE__ = True
if overwrite:
SCREAMING_SNAKE_CASE__ = new_pipeline_docs
if diff:
if overwrite:
SCREAMING_SNAKE_CASE__ = api_doc
with open(_snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(_snake_case , allow_unicode=_snake_case ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_lowerCamelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite) | 6 |
"""simple docstring"""
class lowerCamelCase__ :
def __init__( self ,A ):
UpperCAmelCase = n
UpperCAmelCase = [None] * self.n
UpperCAmelCase = 0 # index of the first element
UpperCAmelCase = 0
UpperCAmelCase = 0
def __len__( self ):
return self.size
def _UpperCamelCase ( self ):
return self.size == 0
def _UpperCamelCase ( self ):
return False if self.is_empty() else self.array[self.front]
def _UpperCamelCase ( self ,A ):
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
UpperCAmelCase = data
UpperCAmelCase = (self.rear + 1) % self.n
self.size += 1
return self
def _UpperCamelCase ( self ):
if self.size == 0:
raise Exception("""UNDERFLOW""" )
UpperCAmelCase = self.array[self.front]
UpperCAmelCase = None
UpperCAmelCase = (self.front + 1) % self.n
self.size -= 1
return temp
| 341 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : Tuple = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=8):
SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=512 , _UpperCAmelCase=512):
SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1)
SCREAMING_SNAKE_CASE = np.array(pil_image.convert('RGB'))
SCREAMING_SNAKE_CASE = arr.astype(np.floataa) / 1_27.5 - 1
SCREAMING_SNAKE_CASE = np.transpose(_UpperCAmelCase , [2, 0, 1])
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCAmelCase).unsqueeze(0)
return image
class _snake_case ( A__ ):
def __init__( self , a , a , a , ) -> Optional[Any]:
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels) - 1)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Any:
# get the original timestep using init_timestep
SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength) , a)
SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0)
SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a=None) -> List[str]:
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a)}''')
SCREAMING_SNAKE_CASE = image.to(device=a , dtype=a)
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE = image
else:
if isinstance(a , a) and len(a) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(a)}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''')
elif isinstance(a , a):
SCREAMING_SNAKE_CASE = [
self.movq.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(a)
]
SCREAMING_SNAKE_CASE = torch.cat(a , dim=0)
else:
SCREAMING_SNAKE_CASE = self.movq.encode(a).latent_dist.sample(a)
SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE = torch.cat([init_latents] , dim=0)
SCREAMING_SNAKE_CASE = init_latents.shape
SCREAMING_SNAKE_CASE = randn_tensor(a , generator=a , device=a , dtype=a)
# get latents
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(a , a , a)
SCREAMING_SNAKE_CASE = init_latents
return latents
def SCREAMING_SNAKE_CASE__ ( self , a=0) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`')
SCREAMING_SNAKE_CASE = torch.device(f'''cuda:{gpu_id}''')
SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a)
def SCREAMING_SNAKE_CASE__ ( self , a=0) -> Optional[Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0'):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.')
SCREAMING_SNAKE_CASE = torch.device(f'''cuda:{gpu_id}''')
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=a)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(a , a , prev_module_hook=a)
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self) -> str:
if not hasattr(self.unet , '_hf_hook'):
return self.device
for module in self.unet.modules():
if (
hasattr(a , '_hf_hook')
and hasattr(module._hf_hook , 'execution_device')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(a)
def __call__( self , a , a , a , a = 512 , a = 512 , a = 100 , a = 4.0 , a = 0.3 , a = 1 , a = None , a = "pil" , a = True , ) -> Tuple:
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(a , a):
SCREAMING_SNAKE_CASE = torch.cat(a , dim=0)
SCREAMING_SNAKE_CASE = image_embeds.shape[0]
if isinstance(a , a):
SCREAMING_SNAKE_CASE = torch.cat(a , dim=0)
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(a , dim=0)
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(a , dim=0)
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=a)
if not isinstance(a , a):
SCREAMING_SNAKE_CASE = [image]
if not all(isinstance(a , (PIL.Image.Image, torch.Tensor)) for i in image):
raise ValueError(
f'''Input is in incorrect format: {[type(a) for i in image]}. Currently, we only support PIL image and pytorch tensor''')
SCREAMING_SNAKE_CASE = torch.cat([prepare_image(a , a , a) for i in image] , dim=0)
SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype , device=a)
SCREAMING_SNAKE_CASE = self.movq.encode(a)['latents']
SCREAMING_SNAKE_CASE = latents.repeat_interleave(a , dim=0)
self.scheduler.set_timesteps(a , device=a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_timesteps(a , a , a)
SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = downscale_height_and_width(a , a , self.movq_scale_factor)
SCREAMING_SNAKE_CASE = self.prepare_latents(
a , a , a , a , image_embeds.dtype , a , a)
for i, t in enumerate(self.progress_bar(a)):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2)
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , 'variance_type')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(a , force_not_quantize=a)['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''')
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1)
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(a)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a)
| 444 |
import os
from collections.abc import Iterator
def lowerCamelCase__ (_UpperCAmelCase = "."):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase)[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase).lstrip('./')
def lowerCamelCase__ (_UpperCAmelCase):
return F'''{i * ' '}*''' if i else "\n##"
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = old_path.split(os.sep)
for i, new_part in enumerate(new_path.split(os.sep)):
if (i + 1 > len(_UpperCAmelCase) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(_UpperCAmelCase)} {new_part.replace('_' , ' ').title()}''')
return new_path
def lowerCamelCase__ (_UpperCAmelCase = "."):
SCREAMING_SNAKE_CASE = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase)):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = os.path.split(_UpperCAmelCase)
if filepath != old_path:
SCREAMING_SNAKE_CASE = print_path(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = (filepath.count(os.sep) + 1) if filepath else 0
SCREAMING_SNAKE_CASE = F'''{filepath}/{filename}'''.replace(' ' , '%20')
SCREAMING_SNAKE_CASE = os.path.splitext(filename.replace('_' , ' ').title())[0]
print(F'''{md_prefix(_UpperCAmelCase)} [{filename}]({url})''')
if __name__ == "__main__":
print_directory_md('.')
| 444 | 1 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : Any = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class a_ ( _a , unittest.TestCase ):
a : Any = PegasusTokenizer
a : Optional[Any] = PegasusTokenizerFast
a : Tuple = True
a : Optional[Any] = True
def UpperCamelCase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase = PegasusTokenizer(__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self ):
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def UpperCamelCase_ ( self , **__UpperCamelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def UpperCamelCase_ ( self , __UpperCamelCase ):
return ("This is a test", "This is a test")
def UpperCamelCase_ ( self ):
_lowercase = """</s>"""
_lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def UpperCamelCase_ ( self ):
_lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(__UpperCamelCase ) , 1_103 )
def UpperCamelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def UpperCamelCase_ ( self ):
_lowercase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_lowercase = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_lowercase = rust_tokenizer([raw_input_str] , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids[0]
_lowercase = py_tokenizer([raw_input_str] , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids[0]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCamelCase_ ( self ):
_lowercase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_lowercase = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_lowercase = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
_lowercase = tokenizer([raw_input_str] , return_tensors=__UpperCamelCase ).input_ids[0]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCamelCase_ ( self ):
_lowercase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
_lowercase = """To ensure a smooth flow of bank resolutions."""
_lowercase = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
_lowercase = tokenizer([raw_input_str] , return_tensors=__UpperCamelCase ).input_ids[0]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCamelCase_ ( self ):
_lowercase = ["""This is going to be way too long.""" * 150, """short example"""]
_lowercase = ["""not super long but more than 5 tokens""", """tiny"""]
_lowercase = self._large_tokenizer(__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" )
_lowercase = self._large_tokenizer(
text_target=__UpperCamelCase , max_length=5 , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCamelCase ) == 2 # input_ids, attention_mask.
@slow
def UpperCamelCase_ ( self ):
# fmt: off
_lowercase = {"""input_ids""": [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class a_ ( _a , unittest.TestCase ):
a : Dict = PegasusTokenizer
a : Tuple = PegasusTokenizerFast
a : Optional[Any] = True
a : List[str] = True
def UpperCamelCase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase = PegasusTokenizer(__UpperCamelCase , offset=0 , mask_token_sent=__UpperCamelCase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self ):
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def UpperCamelCase_ ( self , **__UpperCamelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def UpperCamelCase_ ( self , __UpperCamelCase ):
return ("This is a test", "This is a test")
def UpperCamelCase_ ( self ):
_lowercase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_lowercase = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_lowercase = rust_tokenizer([raw_input_str] , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids[0]
_lowercase = py_tokenizer([raw_input_str] , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids[0]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@require_torch
def UpperCamelCase_ ( self ):
_lowercase = ["""This is going to be way too long.""" * 1_000, """short example"""]
_lowercase = ["""not super long but more than 5 tokens""", """tiny"""]
_lowercase = self._large_tokenizer(__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" )
_lowercase = self._large_tokenizer(
text_target=__UpperCamelCase , max_length=5 , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCamelCase ) == 2 # input_ids, attention_mask.
def UpperCamelCase_ ( self ):
_lowercase = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_lowercase = self._large_tokenizer(__UpperCamelCase ).input_ids
self.assertListEqual(
__UpperCamelCase , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , ) | 287 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
A : int = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
A : Any = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
A : Optional[int] = logging.get_logger(__name__)
A : int = ''' Hello world! cécé herlolip'''
A : List[Any] = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
_lowercase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
_lowercase = dct.pop(SCREAMING_SNAKE_CASE_ )
_lowercase = val
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Any ) -> Any:
_lowercase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
_lowercase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
_lowercase , _lowercase = emb.weight.shape
_lowercase = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
_lowercase = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int=None ) -> int:
if not os.path.exists(SCREAMING_SNAKE_CASE_ ):
_lowercase = torch.hub.load("""pytorch/fairseq""" , SCREAMING_SNAKE_CASE_ ).eval()
else:
_lowercase = load_xsum_checkpoint(SCREAMING_SNAKE_CASE_ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_lowercase = checkpoint_path.replace(""".""" , """-""" )
_lowercase = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
_lowercase = bart.encode(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
_lowercase = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ).encode(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).all():
raise ValueError(
f"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
_lowercase = bart.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
_lowercase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowercase = BartForSequenceClassification(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
_lowercase = bart.predict("""mnli""" , SCREAMING_SNAKE_CASE_ , return_logits=SCREAMING_SNAKE_CASE_ )
_lowercase = model(SCREAMING_SNAKE_CASE_ )[0] # logits
else: # no classification heads to worry about
_lowercase = bart.model.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
_lowercase = state_dict["""decoder.embed_tokens.weight"""]
_lowercase = bart.extract_features(SCREAMING_SNAKE_CASE_ )
if hf_checkpoint_name == "facebook/bart-large":
_lowercase = BartModel(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
_lowercase = model(SCREAMING_SNAKE_CASE_ ).model[0]
else:
_lowercase = BartForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval() # an existing summarization ckpt
model.model.load_state_dict(SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_ , """lm_head""" ):
_lowercase = make_linear_from_emb(model.model.shared )
_lowercase = model.model(SCREAMING_SNAKE_CASE_ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
A : Any = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config) | 287 | 1 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
def __init__( self : int , *UpperCamelCase : Optional[Any] , **UpperCamelCase : List[Any] )->None:
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 447 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
_lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowerCamelCase = 128022
_lowerCamelCase = 128028
@require_sentencepiece
class _SCREAMING_SNAKE_CASE (UpperCamelCase , unittest.TestCase ):
lowerCAmelCase = MaMaaaTokenizer
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = True
def __snake_case ( self : Union[str, Any] )->Optional[Any]:
super().setUp()
__SCREAMING_SNAKE_CASE : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
__SCREAMING_SNAKE_CASE : List[str] = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
__SCREAMING_SNAKE_CASE : int = Path(self.tmpdirname )
save_json(UpperCamelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCamelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
__SCREAMING_SNAKE_CASE : Any = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : Optional[int] , **UpperCamelCase : Any )->Dict:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def __snake_case ( self : Dict , UpperCamelCase : List[str] )->int:
return (
"This is a test",
"This is a test",
)
def __snake_case ( self : str )->Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = "</s>"
__SCREAMING_SNAKE_CASE : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def __snake_case ( self : Tuple )->Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(UpperCamelCase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __snake_case ( self : Dict )->Dict:
pass
def __snake_case ( self : Union[str, Any] )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [2, 3, 4, 5, 6] , )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
__SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_string(UpperCamelCase )
self.assertEqual(UpperCamelCase , "This is a test" )
@slow
def __snake_case ( self : Any )->Union[str, Any]:
# fmt: off
__SCREAMING_SNAKE_CASE : Tuple = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = """facebook/m2m100_418M"""
lowerCAmelCase = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
lowerCAmelCase = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
lowerCAmelCase = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def __snake_case ( cls : List[Any] )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
return cls
def __snake_case ( self : Dict )->Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __snake_case ( self : Any )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = self.tokenizer.get_vocab()
self.assertEqual(len(UpperCamelCase ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , UpperCamelCase )
def __snake_case ( self : List[Any] )->str:
__SCREAMING_SNAKE_CASE : Union[str, Any] = "en"
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase )
def __snake_case ( self : Union[str, Any] )->List[Any]:
self.assertIn(UpperCamelCase , self.tokenizer.all_special_ids )
# fmt: off
__SCREAMING_SNAKE_CASE : Dict = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
__SCREAMING_SNAKE_CASE : int = self.tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase )
def __snake_case ( self : Any )->List[Any]:
__SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Any = MaMaaaTokenizer.from_pretrained(UpperCamelCase )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCamelCase )
@require_torch
def __snake_case ( self : Any )->int:
__SCREAMING_SNAKE_CASE : List[str] = "en"
__SCREAMING_SNAKE_CASE : Dict = "fr"
__SCREAMING_SNAKE_CASE : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase , return_tensors="pt" )
__SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__SCREAMING_SNAKE_CASE : Union[str, Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __snake_case ( self : str )->int:
__SCREAMING_SNAKE_CASE : Optional[Any] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__SCREAMING_SNAKE_CASE : Optional[int] = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __snake_case ( self : Optional[Any] )->List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__SCREAMING_SNAKE_CASE : Optional[int] = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __snake_case ( self : Tuple )->Optional[int]:
__SCREAMING_SNAKE_CASE : Any = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 447 | 1 |
lowercase_ = "Tobias Carryer"
from time import time
class SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , a : Tuple , a : Any , a : List[str] , a : int=int(time() ) )-> List[Any]: # noqa: B008
"""simple docstring"""
lowercase__ = multiplier
lowercase__ = increment
lowercase__ = modulo
lowercase__ = seed
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> str:
"""simple docstring"""
lowercase__ = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
lowercase_ = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 235 |
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
A: Optional[int] = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def _snake_case ( UpperCamelCase : List[str]=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=UpperCAmelCase__ ) )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Tuple = None
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase : str = dataset_module_factory(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = import_main_class(dataset_module.module_path , dataset=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : DatasetBuilder = builder_cls(
cache_dir=_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE , hash=dataset_module.hash , )
UpperCAmelCase : Optional[int] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_SCREAMING_SNAKE_CASE ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
UpperCAmelCase : Tuple = cached_path(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
self.assertTrue(os.path.exists(_SCREAMING_SNAKE_CASE ) )
@pytest.mark.integration
def _snake_case ( UpperCamelCase : int ):
UpperCAmelCase : str = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
UpperCAmelCase : Optional[int] = dataset_module_factory("""wikipedia""" , cache_dir=UpperCamelCase )
UpperCAmelCase : List[Any] = import_main_class(dataset_module.module_path )
UpperCAmelCase : DatasetBuilder = builder_cls(
cache_dir=UpperCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
UpperCAmelCase : List[str] = None
builder_instance.download_and_prepare()
UpperCAmelCase : List[Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : List[str] = dataset_module_factory("""wikipedia""" , cache_dir=UpperCamelCase )
UpperCAmelCase : Optional[Any] = import_main_class(dataset_module.module_path , dataset=UpperCamelCase )
UpperCAmelCase : DatasetBuilder = builder_cls(
cache_dir=UpperCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
UpperCAmelCase : Any = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCamelCase , UpperCamelCase )
assert "train" in ds
assert isinstance(ds["""train"""] , UpperCamelCase )
assert next(iter(ds["""train"""] ) )
| 160 | 0 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowercase : Optional[int] = logging.getLogger()
def snake_case__ ( ):
A : str = argparse.ArgumentParser()
parser.add_argument('''-f''' )
A : str = parser.parse_args()
return args.f
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def snake_case ( self ) -> None:
A : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ) -> Any:
A : List[str] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__UpperCAmelCase , '''argv''' , __UpperCAmelCase ):
A : Optional[int] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__UpperCAmelCase , 0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def snake_case ( self ) -> Optional[Any]:
A : Optional[int] = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__UpperCAmelCase )
A : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__UpperCAmelCase )
A : int = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__UpperCAmelCase )
| 423 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowercase : List[Any] = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowercase : Any = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def snake_case__ ( lowerCamelCase_ ):
A : int = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCamelCase_ )[0]
@deprecated(lowerCamelCase_ , '''Please use tf.data to implement this functionality.''' )
def snake_case__ ( lowerCamelCase_ ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
A : int = _readaa(lowerCamelCase_ )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
A : Tuple = _readaa(lowerCamelCase_ )
A : Union[str, Any] = _readaa(lowerCamelCase_ )
A : Tuple = _readaa(lowerCamelCase_ )
A : Tuple = bytestream.read(rows * cols * num_images )
A : List[str] = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
A : Dict = data.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , 1 )
return data
@deprecated(lowerCamelCase_ , '''Please use tf.one_hot on tensors.''' )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
A : List[str] = labels_dense.shape[0]
A : List[Any] = numpy.arange(lowerCamelCase_ ) * num_classes
A : Any = numpy.zeros((num_labels, num_classes) )
A : Optional[Any] = 1
return labels_one_hot
@deprecated(lowerCamelCase_ , '''Please use tf.data to implement this functionality.''' )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
A : Optional[int] = _readaa(lowerCamelCase_ )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
A : List[str] = _readaa(lowerCamelCase_ )
A : Optional[Any] = bytestream.read(lowerCamelCase_ )
A : List[str] = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowerCamelCase_ , lowerCamelCase_ )
return labels
class __lowercase :
"""simple docstring"""
@deprecated(
__UpperCAmelCase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=dtypes.floataa , __UpperCAmelCase=True , __UpperCAmelCase=None , ) -> Optional[Any]:
A , A : Any = random_seed.get_seed(__UpperCAmelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
A : Optional[int] = dtypes.as_dtype(__UpperCAmelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
A : int = 1_00_00
A : Union[str, Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
A : int = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
A : Dict = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
A : Dict = images.astype(numpy.floataa )
A : Optional[Any] = numpy.multiply(__UpperCAmelCase , 1.0 / 2_5_5.0 )
A : Optional[Any] = images
A : int = labels
A : Tuple = 0
A : int = 0
@property
def snake_case ( self ) -> Optional[Any]:
return self._images
@property
def snake_case ( self ) -> List[str]:
return self._labels
@property
def snake_case ( self ) -> Tuple:
return self._num_examples
@property
def snake_case ( self ) -> List[str]:
return self._epochs_completed
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ) -> Optional[int]:
if fake_data:
A : int = [1] * 7_84
A : List[Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__UpperCAmelCase )],
[fake_label for _ in range(__UpperCAmelCase )],
)
A : Optional[int] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
A : Any = numpy.arange(self._num_examples )
numpy.random.shuffle(__UpperCAmelCase )
A : Dict = self.images[perma]
A : Tuple = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
A : Union[str, Any] = self._num_examples - start
A : str = self._images[start : self._num_examples]
A : Union[str, Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
A : Dict = numpy.arange(self._num_examples )
numpy.random.shuffle(__UpperCAmelCase )
A : str = self.images[perm]
A : Dict = self.labels[perm]
# Start next epoch
A : Dict = 0
A : List[Any] = batch_size - rest_num_examples
A : int = self._index_in_epoch
A : Dict = self._images[start:end]
A : Optional[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
A : Union[str, Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCamelCase_ , '''Please write your own downloading logic.''' )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if not gfile.Exists(lowerCamelCase_ ):
gfile.MakeDirs(lowerCamelCase_ )
A : List[str] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not gfile.Exists(lowerCamelCase_ ):
urllib.request.urlretrieve(lowerCamelCase_ , lowerCamelCase_ ) # noqa: S310
with gfile.GFile(lowerCamelCase_ ) as f:
A : int = f.size()
print('''Successfully downloaded''' , lowerCamelCase_ , lowerCamelCase_ , '''bytes.''' )
return filepath
@deprecated(
lowerCamelCase_ , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=dtypes.floataa , lowerCamelCase_=True , lowerCamelCase_=5000 , lowerCamelCase_=None , lowerCamelCase_=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCamelCase_ , one_hot=lowerCamelCase_ , dtype=lowerCamelCase_ , seed=lowerCamelCase_ )
A : List[str] = fake()
A : int = fake()
A : Optional[int] = fake()
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
if not source_url: # empty string check
A : str = DEFAULT_SOURCE_URL
A : List[Any] = '''train-images-idx3-ubyte.gz'''
A : Dict = '''train-labels-idx1-ubyte.gz'''
A : List[Any] = '''t10k-images-idx3-ubyte.gz'''
A : Optional[Any] = '''t10k-labels-idx1-ubyte.gz'''
A : Tuple = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_images_file )
with gfile.Open(lowerCamelCase_ , '''rb''' ) as f:
A : Tuple = _extract_images(lowerCamelCase_ )
A : List[str] = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_labels_file )
with gfile.Open(lowerCamelCase_ , '''rb''' ) as f:
A : Tuple = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
A : Any = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_images_file )
with gfile.Open(lowerCamelCase_ , '''rb''' ) as f:
A : Union[str, Any] = _extract_images(lowerCamelCase_ )
A : List[Any] = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_labels_file )
with gfile.Open(lowerCamelCase_ , '''rb''' ) as f:
A : str = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
if not 0 <= validation_size <= len(lowerCamelCase_ ):
A : Union[str, Any] = (
'''Validation size should be between 0 and '''
F'{len(lowerCamelCase_ )}. Received: {validation_size}.'
)
raise ValueError(lowerCamelCase_ )
A : Tuple = train_images[:validation_size]
A : str = train_labels[:validation_size]
A : int = train_images[validation_size:]
A : Optional[Any] = train_labels[validation_size:]
A : List[Any] = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
A : Optional[int] = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
A : Any = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
A : Union[str, Any] = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
| 423 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase ( a_ , a_ ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = u
for i in range(1 , a_ ):
SCREAMING_SNAKE_CASE : Dict = temp * (u - i)
return temp
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = int(input('enter the numbers of values: ' ) )
SCREAMING_SNAKE_CASE : list[list[float]] = []
for _ in range(a_ ):
y.append([] )
for i in range(a_ ):
for j in range(a_ ):
y[i].append(a_ )
SCREAMING_SNAKE_CASE : int = 0
print('enter the values of parameters in a list: ' )
SCREAMING_SNAKE_CASE : Tuple = list(map(a_ , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(a_ ):
SCREAMING_SNAKE_CASE : int = float(input() )
SCREAMING_SNAKE_CASE : Optional[Any] = int(input('enter the value to interpolate: ' ) )
SCREAMING_SNAKE_CASE : int = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a_ ):
for j in range(n - i ):
SCREAMING_SNAKE_CASE : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1]
SCREAMING_SNAKE_CASE : str = y[0][0]
for i in range(1 , a_ ):
summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 251 | '''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 251 | 1 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = HfArgumentParser(lowerCamelCase_ )
_lowerCamelCase : List[str] = parser.parse_args_into_dataclasses()[0]
_lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(args=lowerCamelCase_ )
try:
_lowerCamelCase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_lowerCamelCase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
_lowerCamelCase : List[Any] = ''' '''.join(str(lowerCamelCase_ ).split(''' ''' )[:-1] )
_lowerCamelCase : Optional[Any] = ''''''
_lowerCamelCase : List[Any] = eval(str(lowerCamelCase_ ).split(''' ''' )[-1] )
_lowerCamelCase : List[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
_lowerCamelCase : Any = full_error_msg + begin_error_msg + str(lowerCamelCase_ )
raise ValueError(lowerCamelCase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 702 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __snake_case ( _lowercase):
snake_case__ : Dict = "blenderbot-small"
snake_case__ : Optional[int] = ["past_key_values"]
snake_case__ : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , __lowerCAmelCase : Dict=5_0_2_6_5 , __lowerCAmelCase : str=5_1_2 , __lowerCAmelCase : Any=8 , __lowerCAmelCase : Any=2_0_4_8 , __lowerCAmelCase : List[str]=1_6 , __lowerCAmelCase : Dict=8 , __lowerCAmelCase : int=2_0_4_8 , __lowerCAmelCase : List[str]=1_6 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Union[str, Any]=5_1_2 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Union[str, Any]=2 , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : Tuple = d_model
_lowerCamelCase : Optional[Any] = encoder_ffn_dim
_lowerCamelCase : Optional[int] = encoder_layers
_lowerCamelCase : Any = encoder_attention_heads
_lowerCamelCase : int = decoder_ffn_dim
_lowerCamelCase : str = decoder_layers
_lowerCamelCase : str = decoder_attention_heads
_lowerCamelCase : int = dropout
_lowerCamelCase : Optional[Any] = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : str = activation_function
_lowerCamelCase : Dict = init_std
_lowerCamelCase : Optional[int] = encoder_layerdrop
_lowerCamelCase : List[str] = decoder_layerdrop
_lowerCamelCase : Optional[int] = use_cache
_lowerCamelCase : Dict = encoder_layers
_lowerCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , forced_eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
class __snake_case ( _lowercase):
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_lowerCamelCase : List[Any] = {0: '''batch'''}
_lowerCamelCase : int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_lowerCamelCase : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
_lowerCamelCase : Any = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCamelCase : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_lowerCamelCase , _lowerCamelCase : str = self.num_layers
for i in range(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
_lowerCamelCase : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
_lowerCamelCase : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : List[str] = super().outputs
else:
_lowerCamelCase : int = super(__lowerCAmelCase , self ).outputs
if self.use_past:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.num_layers
for i in range(__lowerCAmelCase ):
_lowerCamelCase : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
_lowerCamelCase : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ):
"""simple docstring"""
_lowerCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Generate decoder inputs
_lowerCamelCase : Dict = seq_length if not self.use_past else 1
_lowerCamelCase : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowerCamelCase : str = dict(**__lowerCAmelCase , **__lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_lowerCamelCase , _lowerCamelCase : Optional[Any] = common_inputs['''input_ids'''].shape
_lowerCamelCase : str = common_inputs['''decoder_input_ids'''].shape[1]
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.num_attention_heads
_lowerCamelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCamelCase : Any = decoder_seq_length + 3
_lowerCamelCase : List[str] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCamelCase : int = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__lowerCAmelCase , __lowerCAmelCase )] , dim=1 )
_lowerCamelCase : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCamelCase , _lowerCamelCase : int = self.num_layers
_lowerCamelCase : Union[str, Any] = min(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Tuple = max(__lowerCAmelCase , __lowerCAmelCase ) - min_num_layers
_lowerCamelCase : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
) )
# TODO: test this.
_lowerCamelCase : Optional[int] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowerCAmelCase , __lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ):
"""simple docstring"""
_lowerCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_lowerCamelCase , _lowerCamelCase : List[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_lowerCamelCase : Dict = seqlen + 2
_lowerCamelCase , _lowerCamelCase : Tuple = self.num_layers
_lowerCamelCase , _lowerCamelCase : List[Any] = self.num_attention_heads
_lowerCamelCase : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCamelCase : Tuple = common_inputs['''attention_mask'''].dtype
_lowerCamelCase : Union[str, Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 )
_lowerCamelCase : List[Any] = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(__lowerCAmelCase )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCamelCase : Optional[int] = tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCamelCase : Union[str, Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCamelCase : Optional[Any] = dict(tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
elif self.task == "causal-lm":
_lowerCamelCase : Dict = self._generate_dummy_inputs_for_causal_lm(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
else:
_lowerCamelCase : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : Any = super()._flatten_past_key_values_(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
_lowerCamelCase : Dict = super(__lowerCAmelCase , self )._flatten_past_key_values_(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
| 598 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
def __A ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
lowerCAmelCase = "A painting of a squirrel eating a burger"
lowerCAmelCase = jax.device_count()
lowerCAmelCase = num_samples * [prompt]
lowerCAmelCase = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE )
lowerCAmelCase = replicate(SCREAMING_SNAKE_CASE )
lowerCAmelCase = shard(SCREAMING_SNAKE_CASE )
lowerCAmelCase = jax.random.PRNGKey(0 )
lowerCAmelCase = jax.random.split(SCREAMING_SNAKE_CASE , jax.device_count() )
lowerCAmelCase = sd_pipe(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_inference_steps=2_5 , jit=SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = "stabilityai/stable-diffusion-2"
lowerCAmelCase , lowerCAmelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(SCREAMING_SNAKE_CASE , subfolder="scheduler" )
lowerCAmelCase , lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , revision="bf16" , dtype=jnp.bfloataa , )
lowerCAmelCase = scheduler_params
lowerCAmelCase = "A painting of a squirrel eating a burger"
lowerCAmelCase = jax.device_count()
lowerCAmelCase = num_samples * [prompt]
lowerCAmelCase = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE )
lowerCAmelCase = replicate(SCREAMING_SNAKE_CASE )
lowerCAmelCase = shard(SCREAMING_SNAKE_CASE )
lowerCAmelCase = jax.random.PRNGKey(0 )
lowerCAmelCase = jax.random.split(SCREAMING_SNAKE_CASE , jax.device_count() )
lowerCAmelCase = sd_pipe(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_inference_steps=2_5 , jit=SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 649 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
lowercase : Dict = None
lowercase : Union[str, Any] = {
'7B': 1_1_0_0_8,
'13B': 1_3_8_2_4,
'30B': 1_7_9_2_0,
'65B': 2_2_0_1_6,
'70B': 2_8_6_7_2,
}
lowercase : Tuple = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def __a ( A__ , A__=1 , A__=256 ) -> List[str]:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def __a ( A__ ) -> Optional[int]:
with open(A__ , "r" ) as f:
return json.load(A__ )
def __a ( A__ , A__ ) -> Any:
with open(A__ , "w" ) as f:
json.dump(A__ , A__ )
def __a ( A__ , A__ , A__ , A__=True ) -> Union[str, Any]:
os.makedirs(A__ , exist_ok=A__ )
lowerCAmelCase = os.path.join(A__ , "tmp" )
os.makedirs(A__ , exist_ok=A__ )
lowerCAmelCase = read_json(os.path.join(A__ , "params.json" ) )
lowerCAmelCase = NUM_SHARDS[model_size]
lowerCAmelCase = params["n_layers"]
lowerCAmelCase = params["n_heads"]
lowerCAmelCase = n_heads // num_shards
lowerCAmelCase = params["dim"]
lowerCAmelCase = dim // n_heads
lowerCAmelCase = 10_000.0
lowerCAmelCase = 1.0 / (base ** (torch.arange(0 , A__ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
lowerCAmelCase = params["n_kv_heads"] # for GQA / MQA
lowerCAmelCase = n_heads_per_shard // num_key_value_heads
lowerCAmelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
lowerCAmelCase = n_heads
lowerCAmelCase = n_heads_per_shard
lowerCAmelCase = dim
# permute for sliced rotary
def permute(A__ , A__=n_heads , A__=dim , A__=dim ):
return w.view(A__ , dima // n_heads // 2 , 2 , A__ ).transpose(1 , 2 ).reshape(A__ , A__ )
print(f"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowerCAmelCase = torch.load(os.path.join(A__ , "consolidated.00.pth" ) , map_location="cpu" )
else:
# Sharded
lowerCAmelCase = [
torch.load(os.path.join(A__ , f"consolidated.{i:02d}.pth" ) , map_location="cpu" )
for i in range(A__ )
]
lowerCAmelCase = 0
lowerCAmelCase = {"weight_map": {}}
for layer_i in range(A__ ):
lowerCAmelCase = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
lowerCAmelCase = {
f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wq.weight"] ),
f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wk.weight"] ),
f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowerCAmelCase = {
f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
f"layers.{layer_i}.attention_norm.weight"
].clone(),
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
f"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(A__ , A__ , A__ )
for i in range(A__ )
] , dim=0 , ).reshape(A__ , A__ ) )
lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
A__ , A__ , A__ )
for i in range(A__ )
] , dim=0 , ).reshape(A__ , A__ ) , A__ , A__ , A__ , )
lowerCAmelCase = torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(
A__ , A__ , A__ )
for i in range(A__ )
] , dim=0 , ).reshape(A__ , A__ )
lowerCAmelCase = torch.cat(
[loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(A__ )] , dim=1 )
lowerCAmelCase = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(A__ )] , dim=0 )
lowerCAmelCase = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(A__ )] , dim=1 )
lowerCAmelCase = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(A__ )] , dim=0 )
lowerCAmelCase = inv_freq
for k, v in state_dict.items():
lowerCAmelCase = filename
param_count += v.numel()
torch.save(A__ , os.path.join(A__ , A__ ) )
lowerCAmelCase = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
lowerCAmelCase = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
lowerCAmelCase = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(A__ )] , dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(A__ )] , dim=0 ),
}
for k, v in state_dict.items():
lowerCAmelCase = filename
param_count += v.numel()
torch.save(A__ , os.path.join(A__ , A__ ) )
# Write configs
lowerCAmelCase = {"total_size": param_count * 2}
write_json(A__ , os.path.join(A__ , "pytorch_model.bin.index.json" ) )
lowerCAmelCase = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
lowerCAmelCase = params["multiple_of"] if "multiple_of" in params else 256
lowerCAmelCase = LlamaConfig(
hidden_size=A__ , intermediate_size=compute_intermediate_size(A__ , A__ , A__ ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=A__ , )
config.save_pretrained(A__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
lowerCAmelCase = LlamaForCausalLM.from_pretrained(A__ , torch_dtype=torch.floataa , low_cpu_mem_usage=A__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(A__ , safe_serialization=A__ )
shutil.rmtree(A__ )
def __a ( A__ , A__ ) -> Any:
# Initialize the tokenizer based on the `spm` model
lowerCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
lowerCAmelCase = tokenizer_class(A__ )
tokenizer.save_pretrained(A__ )
def __a ( ) -> Any:
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , )
parser.add_argument(
"--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , )
parser.add_argument(
"--output_dir" , help="Location to write HF model and tokenizer" , )
parser.add_argument("--safe_serialization" , type=A__ , help="Whether or not to save using `safetensors`." )
lowerCAmelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
lowerCAmelCase = os.path.join(args.input_dir , "tokenizer.model" )
write_tokenizer(args.output_dir , A__ )
if __name__ == "__main__":
main()
| 649 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowercase : Optional[int] = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A = b.T
A = np.sum(np.square(UpperCamelCase__ ) , axis=1 )
A = np.sum(np.square(UpperCamelCase__ ) , axis=0 )
A = np.matmul(UpperCamelCase__ , UpperCamelCase__ )
A = aa[:, None] - 2 * ab + ba[None, :]
return d
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
A = x.reshape(-1 , 3 )
A = squared_euclidean_distance(UpperCamelCase__ , UpperCamelCase__ )
return np.argmin(UpperCamelCase__ , axis=1 )
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = ['pixel_values']
def __init__( self , a__ = None , a__ = True , a__ = None , a__ = PILImageResampling.BILINEAR , a__ = True , a__ = True , **a__ , ) -> None:
super().__init__(**a__ )
A = size if size is not None else {"""height""": 256, """width""": 256}
A = get_size_dict(a__ )
A = np.array(a__ ) if clusters is not None else None
A = do_resize
A = size
A = resample
A = do_normalize
A = do_color_quantize
def _UpperCAmelCase ( self , a__ , a__ , a__ = PILImageResampling.BILINEAR , a__ = None , **a__ , ) -> np.ndarray:
A = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
a__ , size=(size["""height"""], size["""width"""]) , resample=a__ , data_format=a__ , **a__ )
def _UpperCAmelCase ( self , a__ , a__ = None , ) -> np.ndarray:
A = rescale(image=a__ , scale=1 / 1_27.5 , data_format=a__ )
A = image - 1
return image
def _UpperCAmelCase ( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ) -> PIL.Image.Image:
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(a__ )
A = resample if resample is not None else self.resample
A = do_normalize if do_normalize is not None else self.do_normalize
A = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
A = clusters if clusters is not None else self.clusters
A = np.array(a__ )
A = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
A = [to_numpy_array(a__ ) for image in images]
if do_resize:
A = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images]
if do_normalize:
A = [self.normalize(image=a__ ) for image in images]
if do_color_quantize:
A = [to_channel_dimension_format(a__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
A = np.array(a__ )
A = color_quantize(a__ , a__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
A = images.shape[0]
A = images.reshape(a__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
A = list(a__ )
else:
A = [to_channel_dimension_format(a__ , a__ ) for image in images]
A = {"""input_ids""": images}
return BatchFeature(data=a__ , tensor_type=a__ )
| 546 |
def _lowerCAmelCase ( UpperCamelCase__: int ) -> bool:
"""simple docstring"""
return str(UpperCamelCase__ ) == str(UpperCamelCase__ )[::-1]
def _lowerCAmelCase ( UpperCamelCase__: int ) -> int:
"""simple docstring"""
return int(UpperCamelCase__ ) + int(str(UpperCamelCase__ )[::-1] )
def _lowerCAmelCase ( UpperCamelCase__: int = 1_00_00 ) -> int:
"""simple docstring"""
A = []
for num in range(1 , UpperCamelCase__ ):
A = 0
A = num
while iterations < 50:
A = sum_reverse(UpperCamelCase__ )
iterations += 1
if is_palindrome(UpperCamelCase__ ):
break
else:
lychrel_nums.append(UpperCamelCase__ )
return len(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 546 | 1 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( __UpperCAmelCase ):
a__ : Optional[Any] = (DDPMScheduler,)
def a ( self : Any , **_lowercase : List[Any] ):
__UpperCAmelCase = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_lowerCamelCase )
return config
def a ( self : Union[str, Any] ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def a ( self : List[Any] ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def a ( self : List[Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def a ( self : Any ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def a ( self : Any ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def a ( self : str ):
self.check_over_configs(thresholding=_lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , )
def a ( self : Optional[Any] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def a ( self : List[Any] ):
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=_lowerCamelCase )
def a ( self : List[str] ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1E-5
def a ( self : Optional[int] ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowerCamelCase )
__UpperCAmelCase = len(_lowerCamelCase )
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
__UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
__UpperCAmelCase = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
__UpperCAmelCase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__UpperCAmelCase = pred_prev_sample
__UpperCAmelCase = torch.sum(torch.abs(_lowerCamelCase ) )
__UpperCAmelCase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def a ( self : Tuple ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
__UpperCAmelCase = scheduler_class(**_lowerCamelCase )
__UpperCAmelCase = len(_lowerCamelCase )
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
__UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
__UpperCAmelCase = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
__UpperCAmelCase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__UpperCAmelCase = pred_prev_sample
__UpperCAmelCase = torch.sum(torch.abs(_lowerCamelCase ) )
__UpperCAmelCase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def a ( self : str ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowerCamelCase )
__UpperCAmelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCamelCase )
__UpperCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowerCamelCase ):
if i == len(_lowerCamelCase ) - 1:
__UpperCAmelCase = -1
else:
__UpperCAmelCase = timesteps[i + 1]
__UpperCAmelCase = scheduler.previous_timestep(_lowerCamelCase )
__UpperCAmelCase = prev_t.item()
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowerCamelCase )
__UpperCAmelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(_lowerCamelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowerCamelCase )
__UpperCAmelCase = [1_00, 87, 50, 1, 0]
__UpperCAmelCase = len(_lowerCamelCase )
with self.assertRaises(_lowerCamelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_lowerCamelCase , timesteps=_lowerCamelCase )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowerCamelCase )
__UpperCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCamelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
| 49 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = "mask2former"
SCREAMING_SNAKE_CASE__ : Any = ["swin"]
SCREAMING_SNAKE_CASE__ : int = {"hidden_size": "hidden_dim"}
def __init__( self: str , _lowerCamelCase: Optional[Dict] = None , _lowerCamelCase: int = 2_56 , _lowerCamelCase: int = 2_56 , _lowerCamelCase: int = 2_56 , _lowerCamelCase: int = 10_24 , _lowerCamelCase: str = "relu" , _lowerCamelCase: int = 6 , _lowerCamelCase: int = 10 , _lowerCamelCase: int = 8 , _lowerCamelCase: float = 0.0 , _lowerCamelCase: int = 20_48 , _lowerCamelCase: bool = False , _lowerCamelCase: bool = False , _lowerCamelCase: int = 4 , _lowerCamelCase: int = 2_55 , _lowerCamelCase: int = 1_00 , _lowerCamelCase: float = 0.1 , _lowerCamelCase: float = 2.0 , _lowerCamelCase: float = 5.0 , _lowerCamelCase: float = 5.0 , _lowerCamelCase: int = 1_25_44 , _lowerCamelCase: float = 3.0 , _lowerCamelCase: float = 0.75 , _lowerCamelCase: float = 0.02 , _lowerCamelCase: float = 1.0 , _lowerCamelCase: bool = True , _lowerCamelCase: List[int] = [4, 8, 16, 32] , _lowerCamelCase: bool = None , **_lowerCamelCase: Optional[int] , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING['''swin'''](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_lowerCamelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = backbone_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(_lowerCamelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
f"Supported model types: {','.join(self.backbones_supported )}" )
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = mask_feature_size
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = encoder_feedforward_dim
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = dim_feedforward
SCREAMING_SNAKE_CASE_ = pre_norm
SCREAMING_SNAKE_CASE_ = enforce_input_projection
SCREAMING_SNAKE_CASE_ = common_stride
SCREAMING_SNAKE_CASE_ = ignore_value
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = no_object_weight
SCREAMING_SNAKE_CASE_ = class_weight
SCREAMING_SNAKE_CASE_ = mask_weight
SCREAMING_SNAKE_CASE_ = dice_weight
SCREAMING_SNAKE_CASE_ = train_num_points
SCREAMING_SNAKE_CASE_ = oversample_ratio
SCREAMING_SNAKE_CASE_ = importance_sample_ratio
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = feature_strides
SCREAMING_SNAKE_CASE_ = output_auxiliary_logits
SCREAMING_SNAKE_CASE_ = decoder_layers
super().__init__(**_lowerCamelCase )
@classmethod
def _A ( cls: int , _lowerCamelCase: PretrainedConfig , **_lowerCamelCase: Tuple ):
return cls(
backbone_config=_lowerCamelCase , **_lowerCamelCase , )
def _A ( self: int ):
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
| 234 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__UpperCamelCase : int = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
__lowercase = self.transformer_dir
shutil.copy(
os.path.join(_lowerCAmelCase , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any]=None ) -> str:
"""simple docstring"""
__lowercase = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
__lowercase = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
__lowercase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
__lowercase = black.format_str(_lowerCAmelCase , mode=_lowerCAmelCase )
__lowercase = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_lowerCAmelCase , """w""" , newline="""\n""" ) as f:
f.write(_lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_lowerCAmelCase )
with open(_lowerCAmelCase , """r""" ) as f:
self.assertTrue(f.read() , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _lowerCAmelCase ) , )
# Copy consistency with a really long name
__lowercase = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , F'{long_class_name}LMPredictionHead' , re.sub("""Bert""" , _lowerCAmelCase , _lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _lowerCAmelCase , overwrite_result=re.sub("""Bert""" , """TestModel""" , _lowerCAmelCase ) , )
def _a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
__lowercase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
__lowercase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__lowercase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
__lowercase , __lowercase = check_copies.convert_to_localized_md(
_lowerCAmelCase , _lowerCAmelCase , localized_readme["""format_model_list"""] )
self.assertFalse(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase , __lowercase = check_copies.convert_to_localized_md(
_lowerCAmelCase , _lowerCAmelCase , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_lowerCAmelCase )
__lowercase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
__lowercase = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__lowercase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__lowercase , __lowercase = check_copies.convert_to_localized_md(
_lowerCAmelCase , _lowerCAmelCase , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
| 53 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 53 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class _snake_case( UpperCAmelCase ):
def __init__(self : Optional[int] , a : Dict=None , a : int=None , *a : Union[str, Any] , **a : Any ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*a , **a )
if config is None:
assert isinstance(self.model , a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
A__ = self.model.config
else:
A__ = config
A__ = data_args
A__ = self.config.tgt_vocab_size if isinstance(self.config , a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
' padding..' )
if self.args.label_smoothing == 0:
A__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
A__ = label_smoothed_nll_loss
def _UpperCamelCase (self : str , a : int ) -> str:
"""simple docstring"""
if self.optimizer is None:
A__ = ['bias', 'LayerNorm.weight']
A__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
A__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
A__ = Adafactor
A__ = {'scale_parameter': False, 'relative_step': False}
else:
A__ = AdamW
A__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
A__ = self.args.learning_rate
if self.sharded_ddp:
A__ = OSS(
params=a , optim=a , **a , )
else:
A__ = optimizer_cls(a , **a )
if self.lr_scheduler is None:
A__ = self._get_lr_scheduler(a )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def _UpperCamelCase (self : Tuple , a : List[Any] ) -> str:
"""simple docstring"""
A__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
A__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
A__ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
A__ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=a )
return scheduler
def _UpperCamelCase (self : List[str] ) -> Optional[torch.utils.data.Sampler]:
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _UpperCamelCase (self : str , a : List[Any] , a : Optional[Any] , a : Any ) -> str:
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
A__ = model(**a , use_cache=a )[0]
A__ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
A__ , A__ = model(**a , labels=a , use_cache=a )[:2]
else:
# compute label smoothed loss
A__ = model(**a , use_cache=a )[0]
A__ = torch.nn.functional.log_softmax(a , dim=-1 )
A__ , A__ = self.loss_fn(a , a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _UpperCamelCase (self : int , a : Union[str, Any] , a : List[Any] ) -> int:
"""simple docstring"""
A__ = inputs.pop('labels' )
A__ , A__ = self._compute_loss(a , a , a )
return loss
def _UpperCamelCase (self : List[str] , a : nn.Module , a : Dict[str, Union[torch.Tensor, Any]] , a : bool , a : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""simple docstring"""
A__ = self._prepare_inputs(a )
A__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
A__ = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
A__ = self._pad_tensors_to_max_len(a , gen_kwargs['max_length'] )
A__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
A__ , A__ = self._compute_loss(a , a , a )
A__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
A__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
A__ = self._pad_tensors_to_max_len(a , gen_kwargs['max_length'] )
return (loss, logits, labels)
def _UpperCamelCase (self : Tuple , a : Tuple , a : str ) -> Optional[Any]:
"""simple docstring"""
A__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
f""" padded to `max_length`={max_length}""" )
A__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
A__ = tensor
return padded_tensor
| 531 |
'''simple docstring'''
def _A ( UpperCAmelCase = 1 ,UpperCAmelCase = 1000 ):
'''simple docstring'''
A__ = 1
A__ = 0
for divide_by_number in range(UpperCAmelCase ,digit + 1 ):
A__ = []
A__ = numerator
for _ in range(1 ,digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(UpperCAmelCase ):
A__ = len(UpperCAmelCase )
A__ = divide_by_number
else:
has_been_divided.append(UpperCAmelCase )
A__ = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 531 | 1 |
def a__ ( ):
return [
a * b * (10_00 - a - b)
for a in range(1 ,9_99 )
for b in range(_UpperCamelCase ,9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"{solution() = }")
| 706 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a_ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
a_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a_ = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
a_ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def a__ ( _UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = None
# source code of `config_class`
__lowerCamelCase = inspect.getsource(_UpperCamelCase )
__lowerCamelCase = _re_checkpoint.findall(_UpperCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
__lowerCamelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__lowerCamelCase = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
__lowerCamelCase = ckpt_name
break
return checkpoint
def a__ ( ):
__lowerCamelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__lowerCamelCase = get_checkpoint_from_config_class(_UpperCamelCase )
__lowerCamelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
__lowerCamelCase = '''\n'''.join(sorted(_UpperCamelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 622 | 0 |
from __future__ import annotations
class A :
'''simple docstring'''
def __init__(self : Union[str, Any] , _UpperCAmelCase : int = 0 ) -> List[Any]:
"""simple docstring"""
lowercase__ = key
def lowerCamelCase__ (self : str , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> list[str]:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCAmelCase ) ^ key ) for ch in content]
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> list[str]:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCAmelCase ) ^ key ) for ch in content]
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : int = 0 ) -> str:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowercase__ = """"""
for ch in content:
ans += chr(ord(_UpperCAmelCase ) ^ key )
return ans
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : int = 0 ) -> str:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowercase__ = """"""
for ch in content:
ans += chr(ord(_UpperCAmelCase ) ^ key )
return ans
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : int = 0 ) -> bool:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
try:
with open(_UpperCAmelCase ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCAmelCase , _UpperCAmelCase ) )
except OSError:
return False
return True
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> bool:
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
try:
with open(_UpperCAmelCase ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCAmelCase , _UpperCAmelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 15 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A : List[Any] = None
A : Optional[Any] = logging.get_logger(__name__)
A : str = '▁'
A : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A : Any = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
A : Optional[int] = {
'google/pegasus-xsum': 5_1_2,
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PegasusTokenizer
A__ = ['''input_ids''', '''attention_mask''']
def __init__(self : Tuple , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Union[str, Any]="<pad>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : int="<unk>" , _UpperCAmelCase : Optional[Any]="<mask_2>" , _UpperCAmelCase : Optional[Any]="<mask_1>" , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Dict=103 , **_UpperCAmelCase : Dict , ) -> Dict:
"""simple docstring"""
lowercase__ = offset
if additional_special_tokens is not None:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_UpperCAmelCase )}, but is'''
f''' {type(_UpperCAmelCase )}''' )
lowercase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_UpperCAmelCase ) , self.offset - 1 )
]
if len(set(_UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowercase__ = additional_special_tokens_extended
else:
lowercase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , pad_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , mask_token_sent=_UpperCAmelCase , offset=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List , _UpperCAmelCase : Optional[List] = None , _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(_UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase__ (self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ (self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 15 | 1 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = LayoutLMTokenizer
_lowercase = LayoutLMTokenizerFast
_lowercase = True
_lowercase = True
def _UpperCamelCase( self : Union[str, Any] ):
super().setUp()
a__ : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _UpperCamelCase( self : int , **lowerCamelCase__ : int ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ):
a__ : Optional[Any] = "UNwant\u00E9d,running"
a__ : str = "unwanted, running"
return input_text, output_text
def _UpperCamelCase( self : int ):
a__ : Optional[int] = self.tokenizer_class(self.vocab_file )
a__ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCamelCase__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def _UpperCamelCase( self : List[str] ):
pass
| 151 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 151 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
snake_case : Optional[int] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
snake_case : str = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
snake_case : Union[str, Any] = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 16_000,
"return_attention_mask": False,
"do_normalize": True,
}
snake_case : Any = tempfile.mkdtemp()
snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case : List[Any] = os.path.join(self.tmpdirname , lowerCamelCase_ )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + "\n" )
# load decoder from hub
snake_case : Optional[Any] = "hf-internal-testing/ngram-beam-search-decoder"
def lowerCamelCase_ ( self , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Dict = self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCamelCase_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCamelCase_ ( self , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCamelCase_ ( self , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowerCamelCase_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = self.get_tokenizer()
snake_case : Optional[int] = self.get_feature_extractor()
snake_case : Any = self.get_decoder()
snake_case : Optional[int] = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , decoder=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , lowerCamelCase_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
snake_case : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(lowerCamelCase_ , "include" ):
WavaVecaProcessorWithLM(
tokenizer=lowerCamelCase_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = self.get_feature_extractor()
snake_case : List[Any] = self.get_tokenizer()
snake_case : Union[str, Any] = self.get_decoder()
snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , decoder=lowerCamelCase_ )
snake_case : Union[str, Any] = floats_list((3, 1_000) )
snake_case : Optional[Any] = feature_extractor(lowerCamelCase_ , return_tensors="np" )
snake_case : List[Any] = processor(lowerCamelCase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = self.get_feature_extractor()
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : str = self.get_decoder()
snake_case : Any = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , decoder=lowerCamelCase_ )
snake_case : int = "This is a test string"
snake_case : Any = processor(text=lowerCamelCase_ )
snake_case : int = tokenizer(lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE=(2, 10, 16) , SCREAMING_SNAKE_CASE=77 ):
"""simple docstring"""
np.random.seed(lowerCamelCase_ )
return np.random.rand(*lowerCamelCase_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = self.get_feature_extractor()
snake_case : str = self.get_tokenizer()
snake_case : Tuple = self.get_decoder()
snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , decoder=lowerCamelCase_ )
snake_case : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
snake_case : str = processor.decode(lowerCamelCase_ )
snake_case : Union[str, Any] = decoder.decode_beams(lowerCamelCase_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Union[str, Any] = self.get_feature_extractor()
snake_case : Dict = self.get_tokenizer()
snake_case : Dict = self.get_decoder()
snake_case : Optional[int] = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , decoder=lowerCamelCase_ )
snake_case : str = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
snake_case : Union[str, Any] = processor.batch_decode(lowerCamelCase_ )
else:
with get_context(lowerCamelCase_ ).Pool() as pool:
snake_case : List[str] = processor.batch_decode(lowerCamelCase_ , lowerCamelCase_ )
snake_case : Dict = list(lowerCamelCase_ )
with get_context("fork" ).Pool() as p:
snake_case : int = decoder.decode_beams_batch(lowerCamelCase_ , lowerCamelCase_ )
snake_case , snake_case , snake_case : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCamelCase_ , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(lowerCamelCase_ , decoded_processor.logit_score )
self.assertListEqual(lowerCamelCase_ , decoded_processor.lm_score )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = self.get_feature_extractor()
snake_case : Any = self.get_tokenizer()
snake_case : Optional[int] = self.get_decoder()
snake_case : List[Any] = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , decoder=lowerCamelCase_ )
snake_case : Optional[int] = self._get_dummy_logits()
snake_case : Any = 15
snake_case : Union[str, Any] = -20.0
snake_case : Optional[Any] = -4.0
snake_case : Tuple = processor.batch_decode(
lowerCamelCase_ , beam_width=lowerCamelCase_ , beam_prune_logp=lowerCamelCase_ , token_min_logp=lowerCamelCase_ , )
snake_case : Any = decoded_processor_out.text
snake_case : Union[str, Any] = list(lowerCamelCase_ )
with get_context("fork" ).Pool() as pool:
snake_case : Tuple = decoder.decode_beams_batch(
lowerCamelCase_ , lowerCamelCase_ , beam_width=lowerCamelCase_ , beam_prune_logp=lowerCamelCase_ , token_min_logp=lowerCamelCase_ , )
snake_case : str = [d[0][0] for d in decoded_decoder_out]
snake_case : Optional[int] = [d[0][2] for d in decoded_decoder_out]
snake_case : Tuple = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , lowerCamelCase_ )
self.assertTrue(np.array_equal(lowerCamelCase_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , lowerCamelCase_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(lowerCamelCase_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , lowerCamelCase_ , atol=1E-3 ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = self.get_feature_extractor()
snake_case : Dict = self.get_tokenizer()
snake_case : List[str] = self.get_decoder()
snake_case : Any = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , decoder=lowerCamelCase_ )
snake_case : Any = self._get_dummy_logits()
snake_case : Union[str, Any] = 2.0
snake_case : int = 5.0
snake_case : List[str] = -20.0
snake_case : Union[str, Any] = True
snake_case : Optional[Any] = processor.batch_decode(
lowerCamelCase_ , alpha=lowerCamelCase_ , beta=lowerCamelCase_ , unk_score_offset=lowerCamelCase_ , lm_score_boundary=lowerCamelCase_ , )
snake_case : Union[str, Any] = decoded_processor_out.text
snake_case : List[str] = list(lowerCamelCase_ )
decoder.reset_params(
alpha=lowerCamelCase_ , beta=lowerCamelCase_ , unk_score_offset=lowerCamelCase_ , lm_score_boundary=lowerCamelCase_ , )
with get_context("fork" ).Pool() as pool:
snake_case : str = decoder.decode_beams_batch(
lowerCamelCase_ , lowerCamelCase_ , )
snake_case : List[str] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , lowerCamelCase_ )
snake_case : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , lowerCamelCase_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
snake_case : int = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
snake_case : List[str] = os.listdir(lowerCamelCase_ )
snake_case : str = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = snapshot_download("hf-internal-testing/processor_with_lm" )
snake_case : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(lowerCamelCase_ )
snake_case : Dict = processor.decoder.model_container[processor.decoder._model_key]
snake_case : Union[str, Any] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
snake_case : int = os.listdir(lowerCamelCase_ )
snake_case : Optional[Any] = os.listdir(lowerCamelCase_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case : Any = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case : str = floats_list((3, 1_000) )
snake_case : Any = processor_wavaveca(lowerCamelCase_ , return_tensors="np" )
snake_case : str = processor_auto(lowerCamelCase_ , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
snake_case : Dict = self._get_dummy_logits()
snake_case : str = processor_wavaveca.batch_decode(lowerCamelCase_ )
snake_case : Any = processor_auto.batch_decode(lowerCamelCase_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = self.get_feature_extractor()
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : str = self.get_decoder()
snake_case : Any = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , decoder=lowerCamelCase_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[Any] = [d[key] for d in offsets]
return retrieved_list
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case : Dict = self._get_dummy_logits()[0]
snake_case : List[Any] = processor.decode(lowerCamelCase_ , output_word_offsets=lowerCamelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case : Union[str, Any] = self._get_dummy_logits()
snake_case : int = processor.batch_decode(lowerCamelCase_ , output_word_offsets=lowerCamelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(lowerCamelCase_ , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCamelCase_ ( self ):
"""simple docstring"""
import torch
snake_case : Union[str, Any] = load_dataset("common_voice" , "en" , split="train" , streaming=lowerCamelCase_ )
snake_case : Dict = ds.cast_column("audio" , datasets.Audio(sampling_rate=16_000 ) )
snake_case : Union[str, Any] = iter(lowerCamelCase_ )
snake_case : List[str] = next(lowerCamelCase_ )
snake_case : Tuple = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
snake_case : int = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
snake_case : List[str] = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
snake_case : Dict = model(lowerCamelCase_ ).logits.cpu().numpy()
snake_case : Optional[Any] = processor.decode(logits[0] , output_word_offsets=lowerCamelCase_ )
snake_case : Any = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
snake_case : Any = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
snake_case : Dict = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(lowerCamelCase_ , "word" ) ) , lowerCamelCase_ )
self.assertEqual(" ".join(self.get_from_offsets(lowerCamelCase_ , "word" ) ) , output.text )
# output times
snake_case : Dict = torch.tensor(self.get_from_offsets(lowerCamelCase_ , "start_time" ) )
snake_case : Any = torch.tensor(self.get_from_offsets(lowerCamelCase_ , "end_time" ) )
# fmt: off
snake_case : Optional[Any] = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
snake_case : Tuple = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=0.01 ) )
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=0.01 ) )
| 134 | import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self : List[Any] , lowerCamelCase_ : int = 128 , lowerCamelCase_ : int = 256 , lowerCamelCase_ : float = 2_0_0_0.0 , lowerCamelCase_ : int = 768 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 64 , lowerCamelCase_ : int = 2048 , lowerCamelCase_ : float = 0.1 , ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Sequential(
nn.Linear(lowerCamelCase_ , d_model * 4 , bias=lowerCamelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCamelCase_ ) , nn.SiLU() , )
UpperCamelCase = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = False
UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
UpperCamelCase = nn.Dropout(p=lowerCamelCase_ )
UpperCamelCase = nn.ModuleList()
for lyr_num in range(lowerCamelCase_ ):
# FiLM conditional T5 decoder
UpperCamelCase = DecoderLayer(d_model=lowerCamelCase_ , d_kv=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ )
self.decoders.append(lowerCamelCase_ )
UpperCamelCase = TaLayerNorm(lowerCamelCase_ )
UpperCamelCase = nn.Dropout(p=lowerCamelCase_ )
UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCamelCase = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCamelCase = self.conditioning_emb(lowerCamelCase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCamelCase = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCamelCase = torch.broadcast_to(
torch.arange(lowerCamelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCamelCase = self.position_encoding(lowerCamelCase_ )
UpperCamelCase = self.continuous_inputs_projection(lowerCamelCase_ )
inputs += position_encodings
UpperCamelCase = self.dropout(lowerCamelCase_ )
# decoder: No padding present.
UpperCamelCase = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCamelCase = [(x, self.encoder_decoder_mask(lowerCamelCase_ , lowerCamelCase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCamelCase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCamelCase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCamelCase = lyr(
lowerCamelCase_ , conditioning_emb=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , )[0]
UpperCamelCase = self.decoder_norm(lowerCamelCase_ )
UpperCamelCase = self.post_dropout(lowerCamelCase_ )
UpperCamelCase = self.spec_out(lowerCamelCase_ )
return spec_out
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
def __init__( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any=1E-6 ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCamelCase_ , d_kv=lowerCamelCase_ , num_heads=lowerCamelCase_ , dropout_rate=lowerCamelCase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCamelCase_ , d_kv=lowerCamelCase_ , num_heads=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ ) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : Union[str, Any]=None , ):
"""simple docstring"""
UpperCamelCase = self.layer[0](
lowerCamelCase_ , conditioning_emb=lowerCamelCase_ , attention_mask=lowerCamelCase_ , )
if encoder_hidden_states is not None:
UpperCamelCase = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
UpperCamelCase = self.layer[1](
lowerCamelCase_ , key_value_states=lowerCamelCase_ , attention_mask=lowerCamelCase_ , )
# Apply Film Conditional Feed Forward layer
UpperCamelCase = self.layer[-1](lowerCamelCase_ , lowerCamelCase_ )
return (hidden_states,)
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
def __init__( self : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
super().__init__()
UpperCamelCase = TaLayerNorm(lowerCamelCase_ )
UpperCamelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase_ )
UpperCamelCase = Attention(query_dim=lowerCamelCase_ , heads=lowerCamelCase_ , dim_head=lowerCamelCase_ , out_bias=lowerCamelCase_ , scale_qk=lowerCamelCase_ )
UpperCamelCase = nn.Dropout(lowerCamelCase_ )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase = self.layer_norm(lowerCamelCase_ )
if conditioning_emb is not None:
UpperCamelCase = self.FiLMLayer(lowerCamelCase_ , lowerCamelCase_ )
# Self-attention block
UpperCamelCase = self.attention(lowerCamelCase_ )
UpperCamelCase = hidden_states + self.dropout(lowerCamelCase_ )
return hidden_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
def __init__( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Dict ):
"""simple docstring"""
super().__init__()
UpperCamelCase = Attention(query_dim=lowerCamelCase_ , heads=lowerCamelCase_ , dim_head=lowerCamelCase_ , out_bias=lowerCamelCase_ , scale_qk=lowerCamelCase_ )
UpperCamelCase = TaLayerNorm(lowerCamelCase_ , eps=lowerCamelCase_ )
UpperCamelCase = nn.Dropout(lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , ):
"""simple docstring"""
UpperCamelCase = self.layer_norm(lowerCamelCase_ )
UpperCamelCase = self.attention(
lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , attention_mask=attention_mask.squeeze(1 ) , )
UpperCamelCase = hidden_states + self.dropout(lowerCamelCase_ )
return layer_output
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
def __init__( self : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ):
"""simple docstring"""
super().__init__()
UpperCamelCase = TaDenseGatedActDense(d_model=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ )
UpperCamelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase_ )
UpperCamelCase = TaLayerNorm(lowerCamelCase_ , eps=lowerCamelCase_ )
UpperCamelCase = nn.Dropout(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None ):
"""simple docstring"""
UpperCamelCase = self.layer_norm(lowerCamelCase_ )
if conditioning_emb is not None:
UpperCamelCase = self.film(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = self.DenseReluDense(lowerCamelCase_ )
UpperCamelCase = hidden_states + self.dropout(lowerCamelCase_ )
return hidden_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
UpperCamelCase = nn.Dropout(lowerCamelCase_ )
UpperCamelCase = NewGELUActivation()
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.act(self.wi_a(lowerCamelCase_ ) )
UpperCamelCase = self.wi_a(lowerCamelCase_ )
UpperCamelCase = hidden_gelu * hidden_linear
UpperCamelCase = self.dropout(lowerCamelCase_ )
UpperCamelCase = self.wo(lowerCamelCase_ )
return hidden_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
def __init__( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict=1E-6 ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Parameter(torch.ones(lowerCamelCase_ ) )
UpperCamelCase = eps
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCamelCase_ )
UpperCamelCase = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCamelCase = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(lowerCamelCase_ , 3.0 )) ))
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
def __init__( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Linear(lowerCamelCase_ , out_features * 2 , bias=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
"""simple docstring"""
UpperCamelCase = self.scale_bias(lowerCamelCase_ )
UpperCamelCase , UpperCamelCase = torch.chunk(lowerCamelCase_ , 2 , -1 )
UpperCamelCase = x * (1 + scale) + shift
return x
| 537 | 0 |
"""simple docstring"""
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def A_ ( UpperCAmelCase__ ) -> List[str]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
super().__init__()
a : Union[str, Any] = module
a : Optional[int] = nn.Sequential(
nn.Linear(module.in_features , __UpperCAmelCase , bias=__UpperCAmelCase ) , nn.Linear(__UpperCAmelCase , module.out_features , bias=__UpperCAmelCase ) , )
a : str = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase_ ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
return self.module(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) + self.adapter(__UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A_ ( unittest.TestCase ):
"""simple docstring"""
lowercase : Tuple = "bigscience/bloom-1b7"
# Constant values
lowercase : Union[str, Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowercase : Union[str, Any] = "Hello my name is"
lowercase : Union[str, Any] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowercase : List[str] = 10
def lowercase_ ( self ) -> int:
# Models and tokenizer
a : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
def lowercase_ ( self ) -> int:
super().setUp()
# Models and tokenizer
a : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
a : Optional[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='auto' )
def lowercase_ ( self ) -> Union[str, Any]:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> List[Any]:
a : int = self.model_abit.config
self.assertTrue(hasattr(__UpperCAmelCase , 'quantization_config' ) )
a : Dict = config.to_dict()
a : int = config.to_diff_dict()
a : Union[str, Any] = config.to_json_string()
def lowercase_ ( self ) -> Any:
from bitsandbytes.nn import Paramsabit
a : Dict = self.model_fpaa.get_memory_footprint()
a : Optional[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
a : Any = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase_ ( self ) -> Union[str, Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase_ ( self ) -> Optional[int]:
a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' )
a : str = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def lowercase_ ( self ) -> Tuple:
a : str = BitsAndBytesConfig()
a : Dict = True
a : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCAmelCase , device_map='auto' )
a : int = self.tokenizer(self.input_text , return_tensors='pt' )
a : Optional[int] = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def lowercase_ ( self ) -> Any:
with self.assertRaises(__UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__UpperCAmelCase )
def lowercase_ ( self ) -> Any:
a : Optional[Any] = BitsAndBytesConfig()
with self.assertRaises(__UpperCAmelCase ):
a : Optional[int] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def lowercase_ ( self ) -> List[str]:
with self.assertRaises(__UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
a : Dict = self.tokenizer(self.input_text , return_tensors='pt' )
a : Tuple = self.model_fpaa.to(torch.floataa )
a : Any = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
a : Any = self.model_fpaa.to('cpu' )
# Check this does not throw an error
a : List[str] = self.model_fpaa.half()
# Check this does not throw an error
a : str = self.model_fpaa.float()
def lowercase_ ( self ) -> Dict:
a : Tuple = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=__UpperCAmelCase , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowercase_ ( cls ) -> int:
a : int = 't5-small'
a : str = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
a : Union[str, Any] = AutoTokenizer.from_pretrained(cls.model_name )
a : Optional[Any] = 'Translate in German: Hello, my dog is cute'
def lowercase_ ( self ) -> List[Any]:
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Dict:
from transformers import TaForConditionalGeneration
a : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules
a : str = None
# test with `t5-small`
a : Dict = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='auto' )
a : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
a : Any = model.generate(**__UpperCAmelCase )
# test with `flan-t5-small`
a : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='auto' )
a : Dict = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
a : Optional[Any] = model.generate(**__UpperCAmelCase )
a : Tuple = modules
def lowercase_ ( self ) -> List[Any]:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
a : Union[str, Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
a : Dict = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
a : Dict = model.generate(**__UpperCAmelCase )
# test with `flan-t5-small`
a : Optional[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='auto' )
a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
a : Optional[Any] = model.generate(**__UpperCAmelCase )
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Tuple:
super().setUp()
# model_name
a : List[str] = 'bigscience/bloom-560m'
a : Union[str, Any] = 't5-small'
# Different types of model
a : Any = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='auto' )
# Sequence classification model
a : List[Any] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__UpperCAmelCase , device_map='auto' )
# CausalLM model
a : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='auto' )
# Seq2seq model
a : str = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__UpperCAmelCase , device_map='auto' )
def lowercase_ ( self ) -> str:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> str:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
super().setUp()
def lowercase_ ( self ) -> Union[str, Any]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> List[Any]:
a : Union[str, Any] = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
a : Union[str, Any] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Any:
super().setUp()
def lowercase_ ( self ) -> List[str]:
a : str = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__UpperCAmelCase , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
a : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
a : Tuple = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
a : int = 'facebook/opt-350m'
super().setUp()
def lowercase_ ( self ) -> str:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
a : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
a : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
a : Union[str, Any] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__UpperCAmelCase ) ):
a : Optional[int] = LoRALayer(module.q_proj , rank=16 )
a : Optional[int] = LoRALayer(module.k_proj , rank=16 )
a : List[str] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
a : List[Any] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
a : int = model.forward(**__UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
lowercase : List[Any] = "gpt2-xl"
lowercase : Any = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 509 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 509 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : str = tempfile.mkdtemp()
__a : Optional[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
__a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__a : Optional[Any] = {
"do_resize": True,
"size": {"height": 224, "width": 224},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
__a : List[str] = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ):
__a : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : Union[str, Any] = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self ):
__a : Optional[int] = self.get_tokenizer()
__a : Optional[Any] = self.get_rust_tokenizer()
__a : Any = self.get_image_processor()
__a : List[Any] = ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
__a : Dict = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
__a : Optional[int] = ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
__a : List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a : Dict = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
__a : List[Any] = self.get_image_processor(do_normalize=_UpperCAmelCase )
__a : Optional[int] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.get_image_processor()
__a : List[str] = self.get_tokenizer()
__a : Optional[Any] = ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__a : Dict = self.prepare_image_inputs()
__a : Tuple = image_processor(_UpperCAmelCase , return_tensors='''np''' )
__a : Dict = processor(images=_UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCamelCase ( self ):
__a : List[str] = self.get_image_processor()
__a : Optional[Any] = self.get_tokenizer()
__a : Optional[int] = ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__a : Tuple = "Alexandra,T-shirt的价格是15便士。"
__a : int = processor(text=_UpperCAmelCase )
__a : Dict = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.get_image_processor()
__a : Tuple = self.get_tokenizer()
__a : Optional[int] = ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__a : Union[str, Any] = "Alexandra,T-shirt的价格是15便士。"
__a : Optional[Any] = self.prepare_image_inputs()
__a : str = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.get_image_processor()
__a : int = self.get_tokenizer()
__a : str = ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__a : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : List[str] = processor.batch_decode(_UpperCAmelCase )
__a : List[str] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.get_image_processor()
__a : Optional[Any] = self.get_tokenizer()
__a : Optional[Any] = ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__a : Optional[Any] = "Alexandra,T-shirt的价格是15便士。"
__a : Any = self.prepare_image_inputs()
__a : Union[str, Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : Any = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '''vivit'''
def __init__( self : Union[str, Any] , lowercase : int=2_24 , lowercase : Tuple=32 , lowercase : str=[2, 16, 16] , lowercase : str=3 , lowercase : Dict=7_68 , lowercase : Union[str, Any]=12 , lowercase : List[Any]=12 , lowercase : Dict=30_72 , lowercase : int="gelu_fast" , lowercase : Dict=0.0 , lowercase : Dict=0.0 , lowercase : List[str]=0.0_2 , lowercase : Tuple=1E-06 , lowercase : Any=True , **lowercase : Union[str, Any] , ):
'''simple docstring'''
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : Dict = attention_probs_dropout_prob
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : List[Any] = image_size
UpperCAmelCase : str = num_frames
UpperCAmelCase : str = tubelet_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Optional[int] = qkv_bias
super().__init__(**lowercase )
| 595 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(a ) , """Tatoeba directory does not exist.""" )
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Any:
_a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=snake_case_ )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
self.resolver.convert_models(["heb-eng"] )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
_a , _a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=snake_case_ )
assert mmeta["long_pair"] == "heb-eng"
| 691 |
'''simple docstring'''
__snake_case : Dict = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 691 | 1 |
"""simple docstring"""
def _snake_case ( _snake_case : int = 1 , _snake_case : int = 10_00 ) -> int:
'''simple docstring'''
_A = 1
_A = 0
for divide_by_number in range(_snake_case , digit + 1 ):
_A = []
_A = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_snake_case ):
_A = len(_snake_case )
_A = divide_by_number
else:
has_been_divided.append(_snake_case )
_A = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
UpperCAmelCase : Optional[int] = '''ViTImageProcessor'''
UpperCAmelCase : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Tuple , _UpperCAmelCase : int=None , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : Dict ):
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
_A = kwargs.pop('feature_extractor' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Optional[Any] , _UpperCAmelCase : int=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Union[str, Any] ):
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
_A = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None and images is not None:
_A = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_A = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self : Tuple ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 7 | 1 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase ) )
def a__( self : List[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase ) )
def a__( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase ) )
def a__( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase ) )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
UpperCAmelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def a__( self : List[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def a__( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCAmelCase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def a__( self : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def a__( self : Optional[int] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
| 50 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowercase : Optional[int] = """examples/"""
_lowercase : str = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowercase : Dict = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowercase : List[Any] = """README.md"""
def lowerCamelCase__ ( A : int , A : str , A : Optional[Any] ):
'''simple docstring'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace('''VERSION''' , A )
UpperCAmelCase = re_pattern.sub(A , A )
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A )
def lowerCamelCase__ ( A : Optional[int] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A , A ) , A , pattern='''examples''' )
def lowerCamelCase__ ( A : str , A : Dict=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A , A , A )
if not patch:
update_version_in_examples(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(A ).groups()[0]
return packaging.version.parse(A )
def lowerCamelCase__ ( A : Tuple=False ):
'''simple docstring'''
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(A ) == 0:
UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(A , patch=A )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = get_version()
UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(A ) == 0:
UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(A )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowercase : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 50 | 1 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=3_0 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1_0 , lowercase_=0.02 , lowercase_=None , ) -> Union[str, Any]:
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def _a ( self) -> int:
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = self.get_config()
return config, pixel_values, labels
def _a ( self) -> List[Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
__snake_case = ViTMSNModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Tuple:
__snake_case = self.type_sequence_label_size
__snake_case = ViTMSNForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , labels=lowercase_)
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}')
print('Labels: {labels}')
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__snake_case = 1
__snake_case = ViTMSNForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _a ( self) -> str:
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> str:
__snake_case = ViTMSNModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=3_7)
def _a ( self) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds')
def _a ( self) -> str:
pass
def _a ( self) -> Union[str, Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(lowercase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(lowercase_)
__snake_case = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def _a ( self) -> List[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> List[str]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@slow
def _a ( self) -> Dict:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ViTMSNModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def A ( ) -> Any:
'''simple docstring'''
__snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _a ( self) -> Any:
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small') if is_vision_available() else None
@slow
def _a ( self) -> Optional[int]:
torch.manual_seed(2)
__snake_case = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small').to(lowercase_)
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=lowercase_ , return_tensors='pt').to(lowercase_)
# forward pass
with torch.no_grad():
__snake_case = model(**lowercase_)
# verify the logits
__snake_case = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowercase_)
__snake_case = torch.tensor([-0.0803, -0.4454, -0.2375]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
| 313 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[Any]:
super().__init__(
lowercase_ , question_encoder_tokenizer=lowercase_ , generator_tokenizer=lowercase_ , index=lowercase_ , init_retrieval=lowercase_ , )
__snake_case = None
def _a ( self , lowercase_) -> Union[str, Any]:
logger.info('initializing retrieval')
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized')
# needs to be set manually
__snake_case = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case = str(distributed_port + 1)
__snake_case = dist.new_group(ranks=lowercase_ , backend='gloo')
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main')
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def _a ( self) -> int:
return dist.get_rank(group=self.process_group) == 0
def _a ( self , lowercase_ , lowercase_ , lowercase_=torch.floataa) -> Dict:
__snake_case = torch.empty(lowercase_ , dtype=lowercase_)
dist.scatter(lowercase_ , src=0 , scatter_list=lowercase_ , group=self.process_group)
return target_tensor
def _a ( self) -> str:
__snake_case = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case = next((addr for addr in addrs if addr.startswith('e')) , lowercase_)
return ifname
def _a ( self , lowercase_ , lowercase_) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
__snake_case , __snake_case = self._main_retrieve(lowercase_ , lowercase_)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase_)
# distributed training
__snake_case = dist.get_world_size(group=self.process_group)
# gather logic
__snake_case = None
if self._is_main():
__snake_case = [torch.empty(question_hidden_states.shape , dtype=torch.floataa) for _ in range(lowercase_)]
dist.gather(torch.tensor(lowercase_) , dst=0 , gather_list=lowercase_ , group=self.process_group)
# scatter logic
__snake_case = question_hidden_states.shape[0]
__snake_case = []
__snake_case = []
if self._is_main():
assert len(lowercase_) == world_size
__snake_case , __snake_case = self._main_retrieve(torch.cat(lowercase_).numpy() , lowercase_)
__snake_case , __snake_case = torch.tensor(lowercase_), torch.tensor(lowercase_)
__snake_case = self._chunk_tensor(lowercase_ , lowercase_)
__snake_case = self._chunk_tensor(lowercase_ , lowercase_)
__snake_case = self._scattered(lowercase_ , [n_queries, n_docs] , target_type=torch.intaa)
__snake_case = self._scattered(lowercase_ , [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowercase_)
| 313 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Dict = logging.get_logger(__name__)
lowercase__ :Optional[int] = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] ='''wavlm'''
def __init__( self ,A__=3_2 ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=0.1 ,A__=0.0 ,A__=0.1 ,A__=0.1 ,A__=0.02 ,A__=1E-5 ,A__="group" ,A__="gelu" ,A__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) ,A__=(5, 2, 2, 2, 2, 2, 2) ,A__=(1_0, 3, 3, 3, 3, 2, 2) ,A__=False ,A__=1_2_8 ,A__=1_6 ,A__=3_2_0 ,A__=8_0_0 ,A__=False ,A__=True ,A__=0.05 ,A__=1_0 ,A__=2 ,A__=0.0 ,A__=1_0 ,A__=3_2_0 ,A__=2 ,A__=0.1 ,A__=1_0_0 ,A__=2_5_6 ,A__=2_5_6 ,A__=0.1 ,A__="mean" ,A__=False ,A__=False ,A__=2_5_6 ,A__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) ,A__=(5, 3, 3, 1, 1) ,A__=(1, 2, 3, 1, 1) ,A__=5_1_2 ,A__=8_0 ,A__=0 ,A__=1 ,A__=2 ,A__=False ,A__=3 ,A__=2 ,A__=3 ,A__=None ,**A__ ,):
super().__init__(**A__ ,pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__)
lowercase = hidden_size
lowercase = feat_extract_norm
lowercase = feat_extract_activation
lowercase = list(A__)
lowercase = list(A__)
lowercase = list(A__)
lowercase = conv_bias
lowercase = num_buckets
lowercase = max_bucket_distance
lowercase = num_conv_pos_embeddings
lowercase = num_conv_pos_embedding_groups
lowercase = len(self.conv_dim)
lowercase = num_hidden_layers
lowercase = intermediate_size
lowercase = hidden_act
lowercase = num_attention_heads
lowercase = hidden_dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = feat_proj_dropout
lowercase = final_dropout
lowercase = layerdrop
lowercase = layer_norm_eps
lowercase = initializer_range
lowercase = num_ctc_classes
lowercase = vocab_size
lowercase = do_stable_layer_norm
lowercase = use_weighted_layer_sum
lowercase = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase = apply_spec_augment
lowercase = mask_time_prob
lowercase = mask_time_length
lowercase = mask_time_min_masks
lowercase = mask_feature_prob
lowercase = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowercase = num_codevectors_per_group
lowercase = num_codevector_groups
lowercase = contrastive_logits_temperature
lowercase = num_negatives
lowercase = codevector_dim
lowercase = proj_codevector_dim
lowercase = diversity_loss_weight
# ctc loss
lowercase = ctc_loss_reduction
lowercase = ctc_zero_infinity
# adapter
lowercase = add_adapter
lowercase = adapter_kernel_size
lowercase = adapter_stride
lowercase = num_adapter_layers
lowercase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase = list(A__)
lowercase = list(A__)
lowercase = list(A__)
lowercase = xvector_output_dim
@property
def A__ ( self):
return functools.reduce(operator.mul ,self.conv_stride ,1)
| 633 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase = default
else:
# KEY is set, convert it to True or False.
try:
lowercase = strtobool(lowerCAmelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase__ :Dict = parse_flag_from_env("RUN_SLOW", default=False)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
if test_case is None:
return partial(lowerCAmelCase__ , version=lowerCAmelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowerCAmelCase__ ) , f'test requires torch version >= {version}' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCAmelCase__ )
lowercase__ :Dict = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCAmelCase__ )
class lowercase ( unittest.TestCase ):
lowercase_ : int =True
@classmethod
def A__ ( cls):
lowercase = tempfile.mkdtemp()
@classmethod
def A__ ( cls):
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def A__ ( self):
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('''**/*'''):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A__)
class lowercase ( unittest.TestCase ):
def A__ ( self):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase ( unittest.TestCase ):
def A__ ( self ,A__):
lowercase = mocks if isinstance(A__ ,(tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = AcceleratorState()
lowercase = tensor[None].clone().to(state.device )
lowercase = gather(lowerCAmelCase__ ).cpu()
lowercase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCAmelCase__ ):
return False
return True
class lowercase :
def __init__( self ,A__ ,A__ ,A__):
lowercase = returncode
lowercase = stdout
lowercase = stderr
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
while True:
lowercase = await stream.readline()
if line:
callback(lowerCAmelCase__ )
else:
break
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False ):
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(lowerCAmelCase__ ) )
lowercase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase = []
lowercase = []
def tee(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="" ):
lowercase = line.decode('''utf-8''' ).rstrip()
sink.append(lowerCAmelCase__ )
if not quiet:
print(lowerCAmelCase__ , lowerCAmelCase__ , file=lowerCAmelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowerCAmelCase__ , )
return _RunOutput(await p.wait() , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=180 , lowerCAmelCase__=False , lowerCAmelCase__=True ):
'''simple docstring'''
lowercase = asyncio.get_event_loop()
lowercase = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase__ , env=lowerCAmelCase__ , stdin=lowerCAmelCase__ , timeout=lowerCAmelCase__ , quiet=lowerCAmelCase__ , echo=lowerCAmelCase__ ) )
lowercase = ''' '''.join(lowerCAmelCase__ )
if result.returncode > 0:
lowercase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = subprocess.check_output(lowerCAmelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCAmelCase__ , '''decode''' ):
lowercase = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCAmelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 633 | 1 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase_ ( A__ ):
"""simple docstring"""
def __init__( self : Dict, UpperCamelCase__ : WhisperForConditionalGeneration, UpperCamelCase__ : WhisperProcessor, UpperCamelCase__ : AutoencoderKL, UpperCamelCase__ : CLIPTextModel, UpperCamelCase__ : CLIPTokenizer, UpperCamelCase__ : UNetaDConditionModel, UpperCamelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCamelCase__ : StableDiffusionSafetyChecker, UpperCamelCase__ : CLIPImageProcessor, ) -> Dict:
super().__init__()
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=_lowerCamelCase, speech_processor=_lowerCamelCase, vae=_lowerCamelCase, text_encoder=_lowerCamelCase, tokenizer=_lowerCamelCase, unet=_lowerCamelCase, scheduler=_lowerCamelCase, feature_extractor=_lowerCamelCase, )
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : Optional[Union[str, int]] = "auto" ) -> int:
if slice_size == "auto":
_A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Dict=1_60_00, UpperCamelCase__ : int = 5_12, UpperCamelCase__ : int = 5_12, UpperCamelCase__ : int = 50, UpperCamelCase__ : float = 7.5, UpperCamelCase__ : Optional[Union[str, List[str]]] = None, UpperCamelCase__ : Optional[int] = 1, UpperCamelCase__ : float = 0.0, UpperCamelCase__ : Optional[torch.Generator] = None, UpperCamelCase__ : Optional[torch.FloatTensor] = None, UpperCamelCase__ : Optional[str] = "pil", UpperCamelCase__ : bool = True, UpperCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCamelCase__ : int = 1, **UpperCamelCase__ : int, ) -> Dict:
_A = self.speech_processor.feature_extractor(
_lowerCamelCase, return_tensors='pt', sampling_rate=_lowerCamelCase ).input_features.to(self.device )
_A = self.speech_model.generate(_lowerCamelCase, max_length=48_00_00 )
_A = self.speech_processor.tokenizer.batch_decode(_lowerCamelCase, skip_special_tokens=_lowerCamelCase, normalize=_lowerCamelCase )[
0
]
if isinstance(_lowerCamelCase, _lowerCamelCase ):
_A = 1
elif isinstance(_lowerCamelCase, _lowerCamelCase ):
_A = len(_lowerCamelCase )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(_lowerCamelCase )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCamelCase, _lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(_lowerCamelCase )}.' )
# get prompt text embeddings
_A = self.tokenizer(
_lowerCamelCase, padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt', )
_A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_A = text_input_ids[:, : self.tokenizer.model_max_length]
_A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_A = text_embeddings.shape
_A = text_embeddings.repeat(1, _lowerCamelCase, 1 )
_A = text_embeddings.view(bs_embed * num_images_per_prompt, _lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_A = 42
if negative_prompt is None:
_A = [''] * batch_size
elif type(_lowerCamelCase ) is not type(_lowerCamelCase ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(_lowerCamelCase )} !='
f' {type(_lowerCamelCase )}.' )
elif isinstance(_lowerCamelCase, _lowerCamelCase ):
_A = [negative_prompt]
elif batch_size != len(_lowerCamelCase ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(_lowerCamelCase )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
' the batch size of `prompt`.' )
else:
_A = negative_prompt
_A = text_input_ids.shape[-1]
_A = self.tokenizer(
_lowerCamelCase, padding='max_length', max_length=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='pt', )
_A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_A = uncond_embeddings.shape[1]
_A = uncond_embeddings.repeat(1, _lowerCamelCase, 1 )
_A = uncond_embeddings.view(batch_size * num_images_per_prompt, _lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_A = torch.randn(_lowerCamelCase, generator=_lowerCamelCase, device='cpu', dtype=_lowerCamelCase ).to(
self.device )
else:
_A = torch.randn(_lowerCamelCase, generator=_lowerCamelCase, device=self.device, dtype=_lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
_A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_A = {}
if accepts_eta:
_A = eta
for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = self.scheduler.scale_model_input(_lowerCamelCase, _lowerCamelCase )
# predict the noise residual
_A = self.unet(_lowerCamelCase, _lowerCamelCase, encoder_hidden_states=_lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
_A = noise_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, **_lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
_A = 1 / 0.18_215 * latents
_A = self.vae.decode(_lowerCamelCase ).sample
_A = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_A = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
_A = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_lowerCamelCase, nsfw_content_detected=_lowerCamelCase )
| 107 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class a__ ( A__ , A__ ):
UpperCAmelCase__ = '''focalnet'''
def __init__( self :Any , _lowerCamelCase :List[Any]=224 , _lowerCamelCase :str=4 , _lowerCamelCase :List[str]=3 , _lowerCamelCase :Optional[Any]=96 , _lowerCamelCase :Dict=False , _lowerCamelCase :Optional[Any]=[192, 384, 768, 768] , _lowerCamelCase :Tuple=[2, 2, 6, 2] , _lowerCamelCase :List[Any]=[2, 2, 2, 2] , _lowerCamelCase :Optional[int]=[3, 3, 3, 3] , _lowerCamelCase :Optional[Any]="gelu" , _lowerCamelCase :Tuple=4.0 , _lowerCamelCase :Optional[Any]=0.0 , _lowerCamelCase :int=0.1 , _lowerCamelCase :List[str]=False , _lowerCamelCase :str=1E-4 , _lowerCamelCase :Optional[int]=False , _lowerCamelCase :List[str]=False , _lowerCamelCase :str=False , _lowerCamelCase :Optional[int]=0.02 , _lowerCamelCase :int=1E-5 , _lowerCamelCase :Tuple=32 , _lowerCamelCase :List[str]=None , _lowerCamelCase :Dict=None , **_lowerCamelCase :List[Any] , ):
'''simple docstring'''
super().__init__(**_lowerCamelCase )
UpperCamelCase_ : Any =image_size
UpperCamelCase_ : int =patch_size
UpperCamelCase_ : int =num_channels
UpperCamelCase_ : Union[str, Any] =embed_dim
UpperCamelCase_ : int =use_conv_embed
UpperCamelCase_ : Optional[int] =hidden_sizes
UpperCamelCase_ : str =depths
UpperCamelCase_ : Any =focal_levels
UpperCamelCase_ : List[str] =focal_windows
UpperCamelCase_ : str =hidden_act
UpperCamelCase_ : Dict =mlp_ratio
UpperCamelCase_ : List[str] =hidden_dropout_prob
UpperCamelCase_ : Optional[int] =drop_path_rate
UpperCamelCase_ : str =use_layerscale
UpperCamelCase_ : List[Any] =layerscale_value
UpperCamelCase_ : Optional[int] =use_post_layernorm
UpperCamelCase_ : Dict =use_post_layernorm_in_modulation
UpperCamelCase_ : Optional[Any] =normalize_modulator
UpperCamelCase_ : List[Any] =initializer_range
UpperCamelCase_ : List[str] =layer_norm_eps
UpperCamelCase_ : List[Any] =encoder_stride
UpperCamelCase_ : str =['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase_ , UpperCamelCase_ : int =get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 357 | 0 |
import math
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(SCREAMING_SNAKE_CASE ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 429 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(SCREAMING_SNAKE_CASE , n - 1 , SCREAMING_SNAKE_CASE ) * a) % mod
else:
lowercase__ = binary_exponentiation(SCREAMING_SNAKE_CASE , n / 2 , SCREAMING_SNAKE_CASE )
return (b * b) % mod
# a prime number
lowerCAmelCase = 701
lowerCAmelCase = 10_0000_0000
lowerCAmelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 429 | 1 |
from ... import PretrainedConfig
__lowerCamelCase : Any = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class a ( UpperCamelCase_ ):
__lowercase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__lowercase = """nezha"""
def __init__( self , __UpperCamelCase=2_11_28 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=64 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=0.1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , **__UpperCamelCase , )-> str:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
A__ : Dict =vocab_size
A__ : Dict =hidden_size
A__ : Optional[Any] =num_hidden_layers
A__ : Tuple =num_attention_heads
A__ : Optional[Any] =hidden_act
A__ : Union[str, Any] =intermediate_size
A__ : Union[str, Any] =hidden_dropout_prob
A__ : List[str] =attention_probs_dropout_prob
A__ : Dict =max_position_embeddings
A__ : Union[str, Any] =max_relative_position
A__ : Optional[Any] =type_vocab_size
A__ : Dict =initializer_range
A__ : Optional[int] =layer_norm_eps
A__ : Dict =classifier_dropout
A__ : Dict =use_cache
| 416 |
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> None:
A__ : Union[str, Any] =generate_pascal_triangle(snake_case_ )
for row_idx in range(snake_case_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx], end=''' ''' )
else:
print(triangle[row_idx][col_idx], end='''''' )
print()
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list[list[int]]:
if not isinstance(snake_case_, snake_case_ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
A__ : list[list[int]] =[]
for current_row_idx in range(snake_case_ ):
A__ : Optional[int] =populate_current_row(snake_case_, snake_case_ )
triangle.append(snake_case_ )
return triangle
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[int]:
A__ : Dict =[-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A__ , A__ : str =1, 1
for current_col_idx in range(1, snake_case_ ):
calculate_current_element(
snake_case_, snake_case_, snake_case_, snake_case_ )
return current_row
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, ) -> None:
A__ : str =triangle[current_row_idx - 1][current_col_idx - 1]
A__ : Optional[Any] =triangle[current_row_idx - 1][current_col_idx]
A__ : Any =above_to_left_elt + above_to_right_elt
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list[list[int]]:
if not isinstance(snake_case_, snake_case_ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
A__ : list[list[int]] =[[1]]
for row_index in range(1, snake_case_ ):
A__ : str =[0] + result[-1] + [0]
A__ : int =row_index + 1
# Calculate the number of distinct elements in a row
A__ : Union[str, Any] =sum(divmod(snake_case_, 2 ) )
A__ : List[str] =[
temp_row[i - 1] + temp_row[i] for i in range(1, distinct_elements + 1 )
]
A__ : List[Any] =row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A__ : Dict =row_first_half + row_second_half
result.append(snake_case_ )
return result
def SCREAMING_SNAKE_CASE__ ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case_, snake_case_ ) -> None:
A__ : Any =f'{func.__name__}({value})'
A__ : Dict =timeit(f'__main__.{call}', setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(snake_case_, snake_case_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 416 | 1 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a__ :
@staticmethod
def UpperCAmelCase( *lowerCamelCase_ : List[str] , **lowerCamelCase_ : List[str] ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a__ ( unittest.TestCase ):
lowerCamelCase__: Tuple = MODEL_FOR_OBJECT_DETECTION_MAPPING
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str ):
a_ : List[str] = ObjectDetectionPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def UpperCAmelCase( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] ):
a_ : Optional[Any] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
import datasets
a_ : Any = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
a_ : int = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
a_ : Dict = object_detector(__UpperCamelCase , threshold=0.0 )
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def UpperCAmelCase( self : Optional[Any] ):
pass
@require_torch
def UpperCAmelCase( self : Tuple ):
a_ : Dict = """hf-internal-testing/tiny-detr-mobilenetsv3"""
a_ : Optional[int] = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
a_ : List[str] = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
a_ : Any = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
a_ : Tuple = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}},
] , )
a_ : int = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}},
],
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}},
],
] , )
@require_torch
@slow
def UpperCAmelCase( self : int ):
a_ : Union[str, Any] = """facebook/detr-resnet-50"""
a_ : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
a_ : Optional[int] = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
a_ : Dict = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
a_ : int = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
] , )
a_ : Optional[int] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
],
] , )
@require_torch
@slow
def UpperCAmelCase( self : Any ):
a_ : List[str] = """facebook/detr-resnet-50"""
a_ : Any = pipeline("""object-detection""" , model=__UpperCamelCase )
a_ : List[Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
] , )
a_ : Dict = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
],
] , )
@require_torch
@slow
def UpperCAmelCase( self : List[str] ):
a_ : Tuple = 0.9_9_8_5
a_ : Tuple = """facebook/detr-resnet-50"""
a_ : List[str] = pipeline("""object-detection""" , model=__UpperCamelCase )
a_ : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__UpperCamelCase )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
] , )
@require_torch
@require_pytesseract
@slow
def UpperCAmelCase( self : Union[str, Any] ):
a_ : int = """Narsil/layoutlmv3-finetuned-funsd"""
a_ : Union[str, Any] = 0.9_9_9_3
a_ : str = pipeline("""object-detection""" , model=__UpperCamelCase , threshold=__UpperCamelCase )
a_ : Optional[int] = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_9_4, """ymin""": 2_5_4, """xmax""": 3_4_3, """ymax""": 2_6_4}},
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_9_4, """ymin""": 2_5_4, """xmax""": 3_4_3, """ymax""": 2_6_4}},
] , )
| 715 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class a__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCamelCase__: str = """convnextv2"""
def __init__( self : Tuple , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=4 , lowerCamelCase_ : Any=4 , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Optional[int]="gelu" , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : str=1E-12 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : Union[str, Any]=2_2_4 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[Any]=None , **lowerCamelCase_ : List[Any] , ):
super().__init__(**lowerCamelCase_ )
a_ : Dict = num_channels
a_ : Optional[int] = patch_size
a_ : int = num_stages
a_ : Any = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
a_ : Any = [3, 3, 9, 3] if depths is None else depths
a_ : Any = hidden_act
a_ : Dict = initializer_range
a_ : Optional[Any] = layer_norm_eps
a_ : int = drop_path_rate
a_ : Optional[Any] = image_size
a_ : Dict = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
a_ , a_ : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 478 | 0 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(
metadata={"help": "The output directory where the model will be written."} , )
A__ : str = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} , )
A__ : str = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} , )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def A ( ) -> Dict:
A__ = HfArgumentParser((ModelArguments,) )
((A__) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
A__ = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
A__ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
A__ = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
A__ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
A__ = True
A__ = True
A__ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__UpperCamelCase , decoder_config=__UpperCamelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
A__ = decoder_config.decoder_start_token_id
A__ = decoder_config.pad_token_id
if decoder_start_token_id is None:
A__ = decoder_config.bos_token_id
if pad_token_id is None:
A__ = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
A__ = decoder_config.eos_token_id
A__ = decoder_start_token_id
A__ = pad_token_id
A__ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
A__ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
A__ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 9 |
from __future__ import annotations
from typing import Any
def A__( __lowerCAmelCase ):
create_state_space_tree(__lowerCAmelCase , [] , 0 )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if index == len(__lowerCAmelCase ):
print(__lowerCAmelCase )
return
create_state_space_tree(__lowerCAmelCase , __lowerCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__lowerCAmelCase , __lowerCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowercase_ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 304 | 0 |
from __future__ import annotations
import math
def __lowerCAmelCase ( UpperCamelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def __lowerCAmelCase ( UpperCamelCase ) -> list[int]:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
lowerCAmelCase__ : Optional[int] = []
for num in range(len(UpperCamelCase ) ):
lowerCAmelCase__ : Tuple = 0
while 2 * i * i <= odd_composites[num]:
lowerCAmelCase__ : Tuple = odd_composites[num] - 2 * i * i
if is_prime(UpperCamelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCamelCase ) == n:
return list_nums
return []
def __lowerCAmelCase ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 470 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 470 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def __A ( a_ : float , a_ : int )-> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = u
for i in range(1 , a_ ):
SCREAMING_SNAKE_CASE : List[Any] = temp * (u - i)
return temp
def __A ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('''enter the numbers of values: ''' ) )
SCREAMING_SNAKE_CASE : list[list[float]] = []
for _ in range(a_ ):
y.append([] )
for i in range(a_ ):
for j in range(a_ ):
y[i].append(a_ )
SCREAMING_SNAKE_CASE : Any = 0
print('''enter the values of parameters in a list: ''' )
SCREAMING_SNAKE_CASE : List[Any] = list(map(a_ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(a_ ):
SCREAMING_SNAKE_CASE : str = float(input() )
SCREAMING_SNAKE_CASE : int = int(input('''enter the value to interpolate: ''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a_ ):
for j in range(n - i ):
SCREAMING_SNAKE_CASE : str = y[j + 1][i - 1] - y[j][i - 1]
SCREAMING_SNAKE_CASE : List[str] = y[0][0]
for i in range(1 , a_ ):
summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ )
print(F"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 698 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """bert"""
def __init__( self :Any , lowerCamelCase_ :List[Any]=3_05_22 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :Tuple=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :int=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :int="absolute" , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=None , **lowerCamelCase_ :List[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 698 | 1 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCamelCase = {
'''facebook/blenderbot_small-90M''': 5_1_2,
}
class snake_case_ ( _a ):
"""simple docstring"""
__UpperCAmelCase =VOCAB_FILES_NAMES
__UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase =BlenderbotSmallTokenizer
def __init__( self , _A=None , _A=None , _A="<|endoftext|>" , _A="<|endoftext|>" , _A="<|endoftext|>" , _A=False , _A=True , **_A , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=_A , merges=_A , add_prefix_space=_A , trim_offsets=_A , ) , bos_token=_A , eos_token=_A , unk_token=_A , **_A , )
__lowerCAmelCase = add_prefix_space
def A__ ( self , _A , _A=None ):
__lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , _A , _A = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 102 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class snake_case_ :
"""simple docstring"""
@staticmethod
def A__ ( *_A , **_A ):
pass
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase =MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def A__ ( self , _A , _A , _A ):
__lowerCAmelCase = DepthEstimationPipeline(model=_A , image_processor=_A )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def A__ ( self , _A , _A ):
__lowerCAmelCase = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , _A )
import datasets
__lowerCAmelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__lowerCAmelCase = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , _A , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def A__ ( self ):
pass
@slow
@require_torch
def A__ ( self ):
__lowerCAmelCase = 'Intel/dpt-large'
__lowerCAmelCase = pipeline('depth-estimation' , model=_A )
__lowerCAmelCase = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
__lowerCAmelCase = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def A__ ( self ):
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 102 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
UpperCamelCase__: Optional[Any] = None
UpperCamelCase__: Optional[int] = logging.get_logger(__name__)
UpperCamelCase__: Optional[int] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__: Any = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase__: Optional[int] = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
UpperCamelCase__: Optional[int] = "▁"
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
lowerCamelCase__ = BarthezTokenizer
def __init__( self : Optional[Any] , __snake_case : Optional[Any]=None , __snake_case : Dict=None , __snake_case : List[str]="<s>" , __snake_case : Union[str, Any]="</s>" , __snake_case : Union[str, Any]="</s>" , __snake_case : List[Any]="<s>" , __snake_case : int="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : List[Any]="<mask>" , **__snake_case : List[Any] , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
__snake_case , tokenizer_file=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , **__snake_case , )
UpperCAmelCase : List[str] = vocab_file
UpperCAmelCase : List[Any] = False if not self.vocab_file else True
def A ( self : Any , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : int = [self.sep_token_id]
UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A ( self : Tuple , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : List[str] = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 127 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : Any = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__snake_case ) )
def A ( self : Tuple ) -> Tuple:
UpperCAmelCase : Optional[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__snake_case ) )
def A ( self : str ) -> str:
UpperCAmelCase : int = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__snake_case ) )
def A ( self : str ) -> Tuple:
UpperCAmelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__snake_case ) )
def A ( self : Tuple ) -> Optional[int]:
UpperCAmelCase : List[str] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__snake_case ) )
def A ( self : int ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCAmelCase : List[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__snake_case , variant=__snake_case ) )
def A ( self : Union[str, Any] ) -> str:
UpperCAmelCase : Optional[int] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCAmelCase : int = '''fp16'''
self.assertTrue(is_safetensors_compatible(__snake_case , variant=__snake_case ) )
def A ( self : Dict ) -> Tuple:
# pass variant but use the non-variant filenames
UpperCAmelCase : str = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
UpperCAmelCase : int = '''fp16'''
self.assertTrue(is_safetensors_compatible(__snake_case , variant=__snake_case ) )
def A ( self : Optional[int] ) -> List[str]:
UpperCAmelCase : str = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCAmelCase : Optional[int] = '''fp16'''
self.assertFalse(is_safetensors_compatible(__snake_case , variant=__snake_case ) )
def A ( self : str ) -> Union[str, Any]:
UpperCAmelCase : List[str] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
UpperCAmelCase : List[str] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__snake_case , variant=__snake_case ) )
def A ( self : Optional[int] ) -> List[str]:
# pass variant but use the non-variant filenames
UpperCAmelCase : Optional[int] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
UpperCAmelCase : List[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__snake_case , variant=__snake_case ) )
def A ( self : int ) -> Union[str, Any]:
UpperCAmelCase : Any = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCAmelCase : Dict = '''fp16'''
self.assertFalse(is_safetensors_compatible(__snake_case , variant=__snake_case ) )
| 127 | 1 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
A : Optional[int] = logging.get_logger(__name__)
A : Optional[Any] = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict , __magic_name__ : Optional[Any]=None , **__magic_name__ : Optional[int] ) -> Optional[int]:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = kwargs.get("model_save_dir" , __magic_name__ )
SCREAMING_SNAKE_CASE_ = kwargs.get("latest_model_name" , __magic_name__ )
def __call__( self : str , **__magic_name__ : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {k: np.array(__magic_name__ ) for k, v in kwargs.items()}
return self.model.run(__magic_name__ , __magic_name__ )
@staticmethod
def __A ( __magic_name__ : Union[str, Path] , __magic_name__ : Optional[int]=None , __magic_name__ : List[str]=None ) -> Optional[Any]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
SCREAMING_SNAKE_CASE_ = "CPUExecutionProvider"
return ort.InferenceSession(__magic_name__ , providers=[provider] , sess_options=__magic_name__ )
def __A ( self : Dict , __magic_name__ : Union[str, Path] , __magic_name__ : Optional[str] = None , **__magic_name__ : int ) -> Dict:
SCREAMING_SNAKE_CASE_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
SCREAMING_SNAKE_CASE_ = self.model_save_dir.joinpath(self.latest_model_name )
SCREAMING_SNAKE_CASE_ = Path(__magic_name__ ).joinpath(__magic_name__ )
try:
shutil.copyfile(__magic_name__ , __magic_name__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
SCREAMING_SNAKE_CASE_ = self.model_save_dir.joinpath(__magic_name__ )
if src_path.exists():
SCREAMING_SNAKE_CASE_ = Path(__magic_name__ ).joinpath(__magic_name__ )
try:
shutil.copyfile(__magic_name__ , __magic_name__ )
except shutil.SameFileError:
pass
def __A ( self : str , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : int , ) -> Any:
if os.path.isfile(__magic_name__ ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
# saving model weights/files
self._save_pretrained(__magic_name__ , **__magic_name__ )
@classmethod
def __A ( cls : Dict , __magic_name__ : Union[str, Path] , __magic_name__ : Optional[Union[bool, str, None]] = None , __magic_name__ : Optional[Union[str, None]] = None , __magic_name__ : bool = False , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = None , __magic_name__ : Optional["ort.SessionOptions"] = None , **__magic_name__ : Any , ) -> Dict:
SCREAMING_SNAKE_CASE_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = OnnxRuntimeModel.load_model(
os.path.join(__magic_name__ , __magic_name__ ) , provider=__magic_name__ , sess_options=__magic_name__ )
SCREAMING_SNAKE_CASE_ = Path(__magic_name__ )
# load model from hub
else:
# download model
SCREAMING_SNAKE_CASE_ = hf_hub_download(
repo_id=__magic_name__ , filename=__magic_name__ , use_auth_token=__magic_name__ , revision=__magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = Path(__magic_name__ ).parent
SCREAMING_SNAKE_CASE_ = Path(__magic_name__ ).name
SCREAMING_SNAKE_CASE_ = OnnxRuntimeModel.load_model(__magic_name__ , provider=__magic_name__ , sess_options=__magic_name__ )
return cls(model=__magic_name__ , **__magic_name__ )
@classmethod
def __A ( cls : Dict , __magic_name__ : Union[str, Path] , __magic_name__ : bool = True , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = None , **__magic_name__ : List[str] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = None
if len(str(__magic_name__ ).split("@" ) ) == 2:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model_id.split("@" )
return cls._from_pretrained(
model_id=__magic_name__ , revision=__magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , use_auth_token=__magic_name__ , **__magic_name__ , )
| 356 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : int = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = {
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'roc_bert'
def __init__( self , _lowerCAmelCase=30522 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=1e-12 , _lowerCAmelCase=True , _lowerCAmelCase=0 , _lowerCAmelCase="absolute" , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=768 , _lowerCAmelCase=910 , _lowerCAmelCase=512 , _lowerCAmelCase=24858 , _lowerCAmelCase=True , **_lowerCAmelCase , ):
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Tuple = max_position_embeddings
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : Tuple = hidden_act
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : List[Any] = type_vocab_size
UpperCAmelCase__ : List[str] = layer_norm_eps
UpperCAmelCase__ : List[str] = use_cache
UpperCAmelCase__ : int = enable_pronunciation
UpperCAmelCase__ : Tuple = enable_shape
UpperCAmelCase__ : str = pronunciation_embed_dim
UpperCAmelCase__ : Tuple = pronunciation_vocab_size
UpperCAmelCase__ : Optional[Any] = shape_embed_dim
UpperCAmelCase__ : Optional[int] = shape_vocab_size
UpperCAmelCase__ : List[Any] = concat_input
UpperCAmelCase__ : Optional[int] = position_embedding_type
UpperCAmelCase__ : Any = classifier_dropout
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 79 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__UpperCamelCase : str = 4
__UpperCamelCase : List[str] = 3
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( A_ ):
for shard in shards:
for i in range(A_ ):
yield {"i": i, "shard": shard}
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = int(os.environ['''RANK'''] )
lowerCAmelCase__ : Union[str, Any] = int(os.environ['''WORLD_SIZE'''] )
lowerCAmelCase__ : Tuple = ArgumentParser()
parser.add_argument('''--streaming''' , type=A_ )
parser.add_argument('''--local_rank''' , type=A_ )
parser.add_argument('''--num_workers''' , type=A_ , default=0 )
lowerCAmelCase__ : Tuple = parser.parse_args()
lowerCAmelCase__ : Tuple = args.streaming
lowerCAmelCase__ : Any = args.num_workers
lowerCAmelCase__ : Dict = {'''shards''': [f'shard_{shard_idx}' for shard_idx in range(A_ )]}
lowerCAmelCase__ : Optional[int] = IterableDataset.from_generator(A_ , gen_kwargs=A_ )
if not streaming:
lowerCAmelCase__ : Dict = Dataset.from_list(list(A_ ) )
lowerCAmelCase__ : Optional[Any] = split_dataset_by_node(A_ , rank=A_ , world_size=A_ )
lowerCAmelCase__ : List[Any] = torch.utils.data.DataLoader(A_ , num_workers=A_ )
lowerCAmelCase__ : Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowerCAmelCase__ : List[str] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowerCAmelCase__ : int = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 450 | 0 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
__lowercase = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
__lowercase = {
'ctrl': 256,
}
__lowercase = {
'Pregnancy': 16_8629,
'Christianity': 7675,
'Explain': 10_6423,
'Fitness': 6_3440,
'Saving': 6_3163,
'Ask': 2_7171,
'Ass': 9_5985,
'Joke': 16_3509,
'Questions': 4_5622,
'Thoughts': 4_9605,
'Retail': 5_2342,
'Feminism': 16_4338,
'Writing': 1_1992,
'Atheism': 19_2263,
'Netflix': 4_8616,
'Computing': 3_9639,
'Opinion': 4_3213,
'Alone': 4_4967,
'Funny': 5_8917,
'Gaming': 4_0358,
'Human': 4088,
'India': 1331,
'Joker': 7_7138,
'Diet': 3_6206,
'Legal': 1_1859,
'Norman': 4939,
'Tip': 7_2689,
'Weight': 5_2343,
'Movies': 4_6273,
'Running': 2_3425,
'Science': 2090,
'Horror': 3_7793,
'Confession': 6_0572,
'Finance': 1_2250,
'Politics': 1_6360,
'Scary': 19_1985,
'Support': 1_2654,
'Technologies': 3_2516,
'Teenage': 6_6160,
'Event': 3_2769,
'Learned': 6_7460,
'Notion': 18_2770,
'Wikipedia': 3_7583,
'Books': 6665,
'Extract': 7_6050,
'Confessions': 10_2701,
'Conspiracy': 7_5932,
'Links': 6_3674,
'Narcissus': 15_0425,
'Relationship': 5_4766,
'Relationships': 13_4796,
'Reviews': 4_1671,
'News': 4256,
'Translation': 2_6820,
'multilingual': 12_8406,
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = set()
__UpperCamelCase :List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCamelCase :Tuple = char
__UpperCamelCase :Any = set(A_ )
return pairs
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ : Tuple = VOCAB_FILES_NAMES
a__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = CONTROL_CODES
def __init__( self , __lowercase , __lowercase , __lowercase="<unk>" , **__lowercase) -> List[Any]:
super().__init__(unk_token=UpperCamelCase__ , **UpperCamelCase__)
with open(UpperCamelCase__ , encoding='''utf-8''') as vocab_handle:
__UpperCamelCase :List[Any] = json.load(UpperCamelCase__)
__UpperCamelCase :Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(UpperCamelCase__ , encoding='''utf-8''') as merges_handle:
__UpperCamelCase :int = merges_handle.read().split('''\n''')[1:-1]
__UpperCamelCase :int = [tuple(merge.split()) for merge in merges]
__UpperCamelCase :Any = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__))))
__UpperCamelCase :Union[str, Any] = {}
@property
def UpperCamelCase__ ( self) -> int:
return len(self.encoder)
def UpperCamelCase__ ( self) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCamelCase__ ( self , __lowercase) -> Any:
if token in self.cache:
return self.cache[token]
__UpperCamelCase :int = tuple(UpperCamelCase__)
__UpperCamelCase :Optional[Any] = tuple(list(word[:-1]) + [word[-1] + '''</w>'''])
__UpperCamelCase :str = get_pairs(UpperCamelCase__)
if not pairs:
return token
while True:
__UpperCamelCase :Any = min(UpperCamelCase__ , key=lambda __lowercase: self.bpe_ranks.get(UpperCamelCase__ , float('''inf''')))
if bigram not in self.bpe_ranks:
break
__UpperCamelCase , __UpperCamelCase :Optional[int] = bigram
__UpperCamelCase :List[Any] = []
__UpperCamelCase :Optional[Any] = 0
while i < len(UpperCamelCase__):
try:
__UpperCamelCase :Optional[Any] = word.index(UpperCamelCase__ , UpperCamelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__UpperCamelCase :str = j
if word[i] == first and i < len(UpperCamelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__UpperCamelCase :Tuple = tuple(UpperCamelCase__)
__UpperCamelCase :Tuple = new_word
if len(UpperCamelCase__) == 1:
break
else:
__UpperCamelCase :Any = get_pairs(UpperCamelCase__)
__UpperCamelCase :Union[str, Any] = '''@@ '''.join(UpperCamelCase__)
__UpperCamelCase :Dict = word[:-4]
__UpperCamelCase :Optional[int] = word
return word
def UpperCamelCase__ ( self , __lowercase) -> Any:
__UpperCamelCase :Any = []
__UpperCamelCase :List[Any] = re.findall(r'''\S+\n?''' , UpperCamelCase__)
for token in words:
split_tokens.extend(list(self.bpe(UpperCamelCase__).split(''' ''')))
return split_tokens
def UpperCamelCase__ ( self , __lowercase) -> Dict:
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token))
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
return self.decoder.get(UpperCamelCase__ , self.unk_token)
def UpperCamelCase__ ( self , __lowercase) -> Dict:
__UpperCamelCase :Dict = ''' '''.join(UpperCamelCase__).replace('''@@ ''' , '''''').strip()
return out_string
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__UpperCamelCase :Optional[int] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase :Any = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__) + '''\n''')
__UpperCamelCase :Dict = 0
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''')
__UpperCamelCase :Any = token_index
writer.write(''' '''.join(UpperCamelCase__) + '''\n''')
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 710 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowercase = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 452 | 0 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCamelCase = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__lowerCamelCase = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__lowerCamelCase = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
__lowerCamelCase = {
"facebook/dpr-ctx_encoder-single-nq-base": 5_12,
"facebook/dpr-ctx_encoder-multiset-base": 5_12,
}
__lowerCamelCase = {
"facebook/dpr-question_encoder-single-nq-base": 5_12,
"facebook/dpr-question_encoder-multiset-base": 5_12,
}
__lowerCamelCase = {
"facebook/dpr-reader-single-nq-base": 5_12,
"facebook/dpr-reader-multiset-base": 5_12,
}
__lowerCamelCase = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
__lowerCamelCase = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
__lowerCamelCase = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _snake_case ( A__ ):
'''simple docstring'''
UpperCamelCase__ =VOCAB_FILES_NAMES
UpperCamelCase__ =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ =DPRContextEncoderTokenizer
class _snake_case ( A__ ):
'''simple docstring'''
UpperCamelCase__ =VOCAB_FILES_NAMES
UpperCamelCase__ =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ =DPRQuestionEncoderTokenizer
__lowerCamelCase = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
__lowerCamelCase = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
__lowerCamelCase = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(A__ )
class _snake_case :
'''simple docstring'''
def __call__( self : List[str] , snake_case : Tuple , snake_case : Optional[str] = None , snake_case : Optional[str] = None , snake_case : Union[bool, str] = False , snake_case : Union[bool, str] = False , snake_case : Optional[int] = None , snake_case : Optional[Union[str, TensorType]] = None , snake_case : Optional[bool] = None , **snake_case : Optional[Any] , ):
if titles is None and texts is None:
return super().__call__(
snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , return_tensors=snake_case , return_attention_mask=snake_case , **snake_case , )
elif titles is None or texts is None:
UpperCAmelCase_ :int = titles if texts is None else texts
return super().__call__(
snake_case , snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , return_tensors=snake_case , return_attention_mask=snake_case , **snake_case , )
UpperCAmelCase_ :str = titles if not isinstance(snake_case , snake_case ) else [titles]
UpperCAmelCase_ :str = texts if not isinstance(snake_case , snake_case ) else [texts]
UpperCAmelCase_ :Union[str, Any] = len(snake_case )
UpperCAmelCase_ :Tuple = questions if not isinstance(snake_case , snake_case ) else [questions] * n_passages
assert len(snake_case ) == len(
snake_case ), f'There should be as many titles than texts but got {len(snake_case )} titles and {len(snake_case )} texts.'
UpperCAmelCase_ :Optional[int] = super().__call__(snake_case , snake_case , padding=snake_case , truncation=snake_case )['''input_ids''']
UpperCAmelCase_ :List[Any] = super().__call__(snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case )['''input_ids''']
UpperCAmelCase_ :Tuple = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(snake_case , snake_case )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ :Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ :Union[str, Any] = attention_mask
return self.pad(snake_case , padding=snake_case , max_length=snake_case , return_tensors=snake_case )
def snake_case_ ( self : int , snake_case : BatchEncoding , snake_case : DPRReaderOutput , snake_case : int = 16 , snake_case : int = 64 , snake_case : int = 4 , ):
UpperCAmelCase_ :str = reader_input['''input_ids''']
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ :Optional[Any] = reader_output[:3]
UpperCAmelCase_ :Optional[int] = len(snake_case )
UpperCAmelCase_ :Tuple = sorted(range(snake_case ) , reverse=snake_case , key=relevance_logits.__getitem__ )
UpperCAmelCase_ :List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_ :List[str] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ :Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ :Dict = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ :Tuple = len(snake_case )
UpperCAmelCase_ :str = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=snake_case , top_spans=snake_case , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=snake_case , start_index=snake_case , end_index=snake_case , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(snake_case ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def snake_case_ ( self : Dict , snake_case : List[int] , snake_case : List[int] , snake_case : int , snake_case : int , ):
UpperCAmelCase_ :Optional[int] = []
for start_index, start_score in enumerate(snake_case ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ :Optional[int] = sorted(snake_case , key=lambda snake_case : x[1] , reverse=snake_case )
UpperCAmelCase_ :Optional[int] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
UpperCAmelCase_ :Optional[int] = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(snake_case ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A__ )
class _snake_case ( A__ , A__ ):
'''simple docstring'''
UpperCamelCase__ =VOCAB_FILES_NAMES
UpperCamelCase__ =READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ =READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ =["""input_ids""", """attention_mask"""]
UpperCamelCase__ =DPRReaderTokenizer
| 608 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class _snake_case :
'''simple docstring'''
def __init__( self : Dict , snake_case : int , snake_case : MutableSequence[float] ):
if len(snake_case ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
UpperCAmelCase_ :list[float] = list(snake_case )
UpperCAmelCase_ :str = degree
def __add__( self : Any , snake_case : Polynomial ):
if self.degree > polynomial_a.degree:
UpperCAmelCase_ :int = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , snake_case )
else:
UpperCAmelCase_ :Any = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , snake_case )
def __sub__( self : List[str] , snake_case : Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , snake_case : Polynomial ):
UpperCAmelCase_ :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , snake_case )
def snake_case_ ( self : Optional[Any] , snake_case : int | float ):
UpperCAmelCase_ :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : List[Any] ):
UpperCAmelCase_ :List[str] = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(snake_case )
return polynomial
def __repr__( self : int ):
return self.__str__()
def snake_case_ ( self : str ):
UpperCAmelCase_ :list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase_ :str = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , snake_case )
def snake_case_ ( self : Optional[int] , snake_case : int | float = 0 ):
UpperCAmelCase_ :list[float] = [0] * (self.degree + 2)
UpperCAmelCase_ :List[str] = constant
for i in range(self.degree + 1 ):
UpperCAmelCase_ :Optional[int] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , snake_case )
def __eq__( self : int , snake_case : object ):
if not isinstance(snake_case , snake_case ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Optional[Any] , snake_case : object ):
return not self.__eq__(snake_case )
| 608 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class a_ ( lowerCamelCase ):
lowercase = """mra"""
def __init__( self , _SCREAMING_SNAKE_CASE=50265 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="full" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = block_per_row
UpperCamelCase = approx_mode
UpperCamelCase = initial_prior_first_n_blocks
UpperCamelCase = initial_prior_diagonal_n_blocks
| 707 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-1'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-2'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-3'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-4'
class a_ ( lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> Any:
"""simple docstring"""
super()._init_()
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=_SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , _SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith("""_""" )}
def A__ ( self , _SCREAMING_SNAKE_CASE = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(_SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 35 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
def A ( self ) -> List[Any]:
"""simple docstring"""
a_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=UpperCamelCase_ ).to(UpperCamelCase_ )
a_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
a_ : int = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
a_ : Optional[int] = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
a_ : int = model(input_ids.to(UpperCamelCase_ ) , labels=labels.to(UpperCamelCase_ ) ).loss
a_ : Optional[Any] = -(labels.shape[-1] * loss.item())
a_ : Optional[Any] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 419 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class snake_case__ ( __A ):
def A ( self ) -> int:
"""simple docstring"""
a_ : Optional[Any] = tempfile.mkdtemp()
a_ : Union[str, Any] = 8
# DPR tok
a_ : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
a_ : List[Any] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
a_ : List[Any] = os.path.join(UpperCamelCase_ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
a_ : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
a_ : List[Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
a_ : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
a_ : Any = {"""unk_token""": """<unk>"""}
a_ : List[str] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
a_ : List[str] = os.path.join(UpperCamelCase_ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
a_ : Union[str, Any] = os.path.join(UpperCamelCase_ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase_ ) )
def A ( self ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def A ( self ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def A ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def A ( self ) -> Optional[int]:
"""simple docstring"""
a_ : Tuple = os.path.join(self.tmpdirname , """rag_tokenizer""" )
a_ : Any = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
a_ : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(UpperCamelCase_ )
rag_tokenizer.save_pretrained(UpperCamelCase_ )
a_ : Union[str, Any] = RagTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , UpperCamelCase_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , UpperCamelCase_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def A ( self ) -> Dict:
"""simple docstring"""
a_ : List[str] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
a_ : int = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
a_ : Optional[int] = tokenizer(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
def A ( self ) -> List[Any]:
"""simple docstring"""
a_ : int = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
a_ : Any = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
a_ : Tuple = tokenizer(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
| 419 | 1 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=5_1_2,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"could not parse string as bool {string}" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
snake_case_ = parser.parse_args()
snake_case_ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 68 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 68 | 1 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
def __init__( self :List[str] ,__lowercase :Dict ,__lowercase :Optional[Any]=1_3 ,__lowercase :str=7 ,__lowercase :List[Any]=True ,__lowercase :Optional[int]=True ,__lowercase :Optional[int]=True ,__lowercase :int=True ,__lowercase :Union[str, Any]=9_9 ,__lowercase :Union[str, Any]=3_2 ,__lowercase :Any=5 ,__lowercase :Any=4 ,__lowercase :Optional[int]=3_7 ,__lowercase :Union[str, Any]="gelu" ,__lowercase :int=0.1 ,__lowercase :Any=0.1 ,__lowercase :str=5_1_2 ,__lowercase :Union[str, Any]=1_6 ,__lowercase :Dict=2 ,__lowercase :Tuple=0.02 ,__lowercase :List[Any]=3 ,__lowercase :Union[str, Any]=4 ,__lowercase :Tuple=None ,):
snake_case__ : str = parent
snake_case__ : List[Any] = batch_size
snake_case__ : Optional[Any] = seq_length
snake_case__ : str = is_training
snake_case__ : int = use_input_mask
snake_case__ : Optional[Any] = use_token_type_ids
snake_case__ : Any = use_labels
snake_case__ : Optional[Any] = vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : List[str] = hidden_act
snake_case__ : Dict = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : Any = type_vocab_size
snake_case__ : List[Any] = type_sequence_label_size
snake_case__ : Dict = initializer_range
snake_case__ : Optional[Any] = num_labels
snake_case__ : Tuple = num_choices
snake_case__ : int = scope
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ : str = None
if self.use_input_mask:
snake_case__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : List[Any] = None
if self.use_token_type_ids:
snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case__ : List[str] = None
snake_case__ : str = None
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices )
snake_case__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self :str ):
return NystromformerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,)
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :List[str] ,__lowercase :Optional[Any] ,__lowercase :str ,__lowercase :Any ,__lowercase :Optional[int] ,__lowercase :Union[str, Any] ,__lowercase :List[Any] ):
snake_case__ : Optional[int] = NystromformerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Dict = model(__lowercase ,attention_mask=__lowercase ,token_type_ids=__lowercase )
snake_case__ : Optional[Any] = model(__lowercase ,token_type_ids=__lowercase )
snake_case__ : Any = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Any ,__lowercase :List[Any] ,__lowercase :int ,__lowercase :List[str] ,__lowercase :int ,__lowercase :int ,__lowercase :int ,__lowercase :List[Any] ):
snake_case__ : Dict = NystromformerForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Tuple = model(__lowercase ,attention_mask=__lowercase ,token_type_ids=__lowercase ,labels=__lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self :Tuple ,__lowercase :List[str] ,__lowercase :Optional[int] ,__lowercase :str ,__lowercase :Union[str, Any] ,__lowercase :List[Any] ,__lowercase :List[Any] ,__lowercase :List[str] ):
snake_case__ : Optional[Any] = NystromformerForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : List[Any] = model(
__lowercase ,attention_mask=__lowercase ,token_type_ids=__lowercase ,start_positions=__lowercase ,end_positions=__lowercase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowerCamelCase ( self :List[str] ,__lowercase :Union[str, Any] ,__lowercase :Optional[int] ,__lowercase :Any ,__lowercase :Union[str, Any] ,__lowercase :Optional[int] ,__lowercase :Optional[Any] ,__lowercase :List[Any] ):
snake_case__ : List[str] = self.num_labels
snake_case__ : Any = NystromformerForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : List[Any] = model(__lowercase ,attention_mask=__lowercase ,token_type_ids=__lowercase ,labels=__lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[str] ,__lowercase :Optional[int] ,__lowercase :List[Any] ,__lowercase :Optional[Any] ,__lowercase :Union[str, Any] ,__lowercase :Optional[Any] ,__lowercase :List[str] ):
snake_case__ : Any = self.num_labels
snake_case__ : List[Any] = NystromformerForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : List[Any] = model(__lowercase ,attention_mask=__lowercase ,token_type_ids=__lowercase ,labels=__lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Union[str, Any] ,__lowercase :Optional[int] ,__lowercase :List[str] ,__lowercase :Optional[int] ,__lowercase :int ,__lowercase :Optional[int] ,__lowercase :int ):
snake_case__ : Any = self.num_choices
snake_case__ : Dict = NystromformerForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
snake_case__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
snake_case__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
snake_case__ : Dict = model(
__lowercase ,attention_mask=__lowercase ,token_type_ids=__lowercase ,labels=__lowercase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : int = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : List[str] = config_and_inputs
snake_case__ : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : str = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : List[str] = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : List[Any] = False
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Optional[Any] = NystromformerModelTester(self )
snake_case__ : List[str] = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7 )
def __lowerCamelCase ( self :Optional[Any] ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self :Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : int = type
self.model_tester.create_and_check_model(*__lowercase )
def __lowerCamelCase ( self :Any ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def __lowerCamelCase ( self :Dict ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowercase )
def __lowerCamelCase ( self :int ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
@slow
def __lowerCamelCase ( self :List[str] ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Dict = NystromformerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_torch
class a ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self :str ):
snake_case__ : int = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
snake_case__ : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
snake_case__ : List[str] = model(__lowercase )[0]
snake_case__ : int = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape ,__lowercase )
snake_case__ : List[Any] = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__lowercase ,atol=1e-4 ) )
@slow
def __lowerCamelCase ( self :int ):
snake_case__ : Union[str, Any] = '''the [MASK] of Belgium is Brussels'''
snake_case__ : List[Any] = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
snake_case__ : Union[str, Any] = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
snake_case__ : Any = tokenizer(__lowercase ,return_tensors='''pt''' )
with torch.no_grad():
snake_case__ : int = model(encoding.input_ids ).logits
snake_case__ : List[str] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__lowercase ) ,'''capital''' )
| 252 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A__ = logging.get_logger(__name__)
A__ = {'''vocab_file''': '''vocab.txt'''}
A__ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
A__ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
A__ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[Any] = ConvBertTokenizer
def __init__( self :Any ,__lowercase :Optional[int]=None ,__lowercase :str=None ,__lowercase :Union[str, Any]=True ,__lowercase :Dict="[UNK]" ,__lowercase :List[Any]="[SEP]" ,__lowercase :int="[PAD]" ,__lowercase :Union[str, Any]="[CLS]" ,__lowercase :List[str]="[MASK]" ,__lowercase :List[Any]=True ,__lowercase :List[str]=None ,**__lowercase :List[str] ,):
super().__init__(
__lowercase ,tokenizer_file=__lowercase ,do_lower_case=__lowercase ,unk_token=__lowercase ,sep_token=__lowercase ,pad_token=__lowercase ,cls_token=__lowercase ,mask_token=__lowercase ,tokenize_chinese_chars=__lowercase ,strip_accents=__lowercase ,**__lowercase ,)
snake_case__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,__lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,__lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,__lowercase ) != tokenize_chinese_chars
):
snake_case__ : Union[str, Any] = getattr(__lowercase ,normalizer_state.pop('''type''' ) )
snake_case__ : int = do_lower_case
snake_case__ : Union[str, Any] = strip_accents
snake_case__ : List[str] = tokenize_chinese_chars
snake_case__ : Tuple = normalizer_class(**__lowercase )
snake_case__ : Any = do_lower_case
def __lowerCamelCase ( self :int ,__lowercase :Union[str, Any] ,__lowercase :List[Any]=None ):
snake_case__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self :List[Any] ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : str = [self.sep_token_id]
snake_case__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :str ,__lowercase :Optional[str] = None ):
snake_case__ : Optional[int] = self._tokenizer.model.save(__lowercase ,name=__lowercase )
return tuple(__lowercase )
| 252 | 1 |
'''simple docstring'''
import heapq
import sys
import numpy as np
lowerCamelCase_ : List[str] = tuple[int, int]
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = set()
def A ( self : int ) -> List[Any]:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def A ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.elements ) == 0
def A ( self : str , lowercase : Union[str, Any] , lowercase : Optional[Any] ) -> Dict:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowerCAmelCase )
else:
# update
# print("update", item)
UpperCamelCase__ = []
((UpperCamelCase__) , (UpperCamelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((UpperCamelCase__) , (UpperCamelCase__)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def A ( self : List[str] , lowercase : Optional[Any] ) -> Dict:
'''simple docstring'''
if item in self.set:
self.set.remove(_lowerCAmelCase )
UpperCamelCase__ = []
((UpperCamelCase__) , (UpperCamelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((UpperCamelCase__) , (UpperCamelCase__)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def A ( self : Any ) -> List[Any]:
'''simple docstring'''
return self.elements[0][1]
def A ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
((UpperCamelCase__) , (UpperCamelCase__)) = heapq.heappop(self.elements )
self.set.remove(_lowerCAmelCase )
return (priority, item)
def __magic_name__( _A , _A ):
'''simple docstring'''
UpperCamelCase__ = np.array(_A )
UpperCamelCase__ = np.array(_A )
return np.linalg.norm(a - b )
def __magic_name__( _A , _A ):
'''simple docstring'''
return consistent_heuristic(_A , _A ) // t
def __magic_name__( _A , _A ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __magic_name__( _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ = g_function[start] + Wa * heuristics[i](_A , _A )
return ans
def __magic_name__( _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ = np.chararray((n, n) )
for i in range(_A ):
for j in range(_A ):
UpperCamelCase__ = """*"""
for i in range(_A ):
for j in range(_A ):
if (j, (n - 1) - i) in blocks:
UpperCamelCase__ = """#"""
UpperCamelCase__ = """-"""
UpperCamelCase__ = back_pointer[goal]
while x != start:
((UpperCamelCase__) , (UpperCamelCase__)) = x
# print(x)
UpperCamelCase__ = """-"""
UpperCamelCase__ = back_pointer[x]
UpperCamelCase__ = """-"""
for i in range(_A ):
for j in range(_A ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
UpperCamelCase__ = back_pointer[goal]
while x != start:
print(_A , end=""" """ )
UpperCamelCase__ = back_pointer[x]
print(_A )
sys.exit()
def __magic_name__( _A ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __magic_name__( _A , _A , _A , _A , _A , _A , _A , _A , ):
'''simple docstring'''
for itera in range(_A ):
open_list[itera].remove_element(_A )
# print("s", s)
# print("j", j)
((UpperCamelCase__) , (UpperCamelCase__)) = s
UpperCamelCase__ = (x - 1, y)
UpperCamelCase__ = (x + 1, y)
UpperCamelCase__ = (x, y + 1)
UpperCamelCase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_A ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_A )
UpperCamelCase__ = -1
UpperCamelCase__ = float("""inf""" )
if valid(_A ) and g_function[neighbours] > g_function[s] + 1:
UpperCamelCase__ = g_function[s] + 1
UpperCamelCase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(_A , key(_A , 0 , _A , _A ) )
if neighbours not in close_list_inad:
for var in range(1 , _A ):
if key(_A , _A , _A , _A ) <= Wa * key(
_A , 0 , _A , _A ):
open_list[j].put(
_A , key(_A , _A , _A , _A ) )
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowerCamelCase_ : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowerCamelCase_ : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
lowerCamelCase_ : Optional[Any] = make_common_ground()
lowerCamelCase_ : Dict = blocks_blk
# hyper parameters
lowerCamelCase_ : Union[str, Any] = 1
lowerCamelCase_ : Union[str, Any] = 1
lowerCamelCase_ : Optional[int] = 20
lowerCamelCase_ : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
lowerCamelCase_ : str = (0, 0)
lowerCamelCase_ : str = (n - 1, n - 1)
lowerCamelCase_ : Optional[Any] = 1
def __magic_name__( _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ = {start: 0, goal: float("""inf""" )}
UpperCamelCase__ = {start: -1, goal: -1}
UpperCamelCase__ = []
UpperCamelCase__ = set()
for i in range(_A ):
open_list.append(PriorityQueue() )
open_list[i].put(_A , key(_A , _A , _A , _A ) )
UpperCamelCase__ = []
UpperCamelCase__ = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , _A ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(_A , _A , _A )
else:
UpperCamelCase__ , UpperCamelCase__ = open_list[i].top_show()
visited.add(_A )
expand_state(
_A , _A , _A , _A , _A , _A , _A , _A , )
close_list_inad.append(_A )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(_A , _A , _A )
else:
UpperCamelCase__ = open_list[0].top_show()
visited.add(_A )
expand_state(
_A , 0 , _A , _A , _A , _A , _A , _A , )
close_list_anchor.append(_A )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_A ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 709 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _SCREAMING_SNAKE_CASE ( yaml.SafeLoader ):
'''simple docstring'''
def A ( self : List[str] , lowercase : List[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ = [self.constructed_objects[key_node] for key_node, _ in node.value]
UpperCamelCase__ = [tuple(lowercase ) if isinstance(lowercase , lowercase ) else key for key in keys]
UpperCamelCase__ = Counter(lowercase )
UpperCamelCase__ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}" )
def A ( self : List[str] , lowercase : int , lowercase : str=False ) -> Any:
'''simple docstring'''
UpperCamelCase__ = super().construct_mapping(lowercase , deep=lowercase )
self._check_no_duplicates_on_constructed_node(lowercase )
return mapping
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
UpperCamelCase__ = full_content[1:].index("""---""" ) + 1
UpperCamelCase__ = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_A )
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : Tuple = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def A ( cls : int , lowercase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowercase , encoding="""utf-8""" ) as readme_file:
UpperCamelCase__ , UpperCamelCase__ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowercase )
else:
return cls()
def A ( self : int , lowercase : Path ) -> Dict:
'''simple docstring'''
if path.exists():
with open(lowercase , encoding="""utf-8""" ) as readme_file:
UpperCamelCase__ = readme_file.read()
else:
UpperCamelCase__ = None
UpperCamelCase__ = self._to_readme(lowercase )
with open(lowercase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowercase )
def A ( self : Any , lowercase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
UpperCamelCase__ , UpperCamelCase__ = _split_yaml_from_readme(lowercase )
UpperCamelCase__ = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
UpperCamelCase__ = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def A ( cls : Tuple , lowercase : str ) -> "DatasetMetadata":
'''simple docstring'''
UpperCamelCase__ = yaml.load(lowercase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
UpperCamelCase__ = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowercase )
def A ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowercase , allow_unicode=lowercase , encoding="""utf-8""" , ).decode("""utf-8""" )
lowerCamelCase_ : str = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowerCamelCase_ : Tuple = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
lowerCamelCase_ : str = ap.parse_args()
lowerCamelCase_ : List[str] = Path(args.readme_filepath)
lowerCamelCase_ : Tuple = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 265 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> None:
snake_case__ = len(__lowerCAmelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__lowerCAmelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __lowerCAmelCase , __lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> None:
snake_case__ = []
depth_first_search([] , [] , [] , __lowerCAmelCase , __lowerCAmelCase )
# Print all the boards
for board in boards:
for column in board:
print(__lowerCAmelCase )
print('''''' )
print(len(__lowerCAmelCase ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 33 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> int:
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = ''''''
else:
snake_case__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
snake_case__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = dct.pop(__lowerCAmelCase )
snake_case__ = val
def SCREAMING_SNAKE_CASE ( ) -> str:
snake_case__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = ViTConfig()
snake_case__ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case__ = True
snake_case__ = int(vit_name[-12:-10] )
snake_case__ = int(vit_name[-9:-6] )
else:
snake_case__ = 1000
snake_case__ = '''huggingface/label-files'''
snake_case__ = '''imagenet-1k-id2label.json'''
snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(vit_name[-6:-4] )
snake_case__ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif vit_name[9:].startswith('''small''' ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
snake_case__ = 768
snake_case__ = 2304
snake_case__ = 8
snake_case__ = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
elif vit_name[4:].startswith('''huge''' ):
snake_case__ = 1280
snake_case__ = 5120
snake_case__ = 32
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
snake_case__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ = ViTModel(__lowerCAmelCase ).eval()
else:
snake_case__ = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case__ = DeiTImageProcessor(size=config.image_size )
else:
snake_case__ = ViTImageProcessor(size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case__ = encoding['''pixel_values''']
snake_case__ = model(__lowerCAmelCase )
if base_model:
snake_case__ = timm_model.forward_features(__lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
snake_case__ = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase__ : str = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 33 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __magic_name__ ( _lowerCamelCase : Optional[int]="" ):
__a : str = tempfile.mkdtemp()
return os.path.join(_lowerCamelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = torch.rand(12 , dtype=torch.floataa ) - 0.5
__a : str = AgentAudio(_lowercase )
__a : Optional[int] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_lowercase , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(_lowercase ) )
# Ensure that the file contains the same value as the original tensor
__a , __a : str = sf.read(_lowercase )
self.assertTrue(torch.allclose(_lowercase , torch.tensor(_lowercase ) , atol=1e-4 ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = torch.rand(12 , dtype=torch.floataa ) - 0.5
__a : Optional[int] = get_new_path(suffix=""".wav""" )
sf.write(_lowercase , _lowercase , 16000 )
__a : Union[str, Any] = AgentAudio(_lowercase )
self.assertTrue(torch.allclose(_lowercase , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , _lowercase )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = torch.randint(0 , 256 , (64, 64, 3) )
__a : Dict = AgentImage(_lowercase )
__a : Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_lowercase , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_lowercase ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
__a : List[str] = Image.open(_lowercase )
__a : Optional[int] = AgentImage(_lowercase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_lowercase ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
__a : List[str] = Image.open(_lowercase )
__a : str = AgentImage(_lowercase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_lowercase ) )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = """Hey!"""
__a : int = AgentText(_lowercase )
self.assertEqual(_lowercase , agent_type.to_string() )
self.assertEqual(_lowercase , agent_type.to_raw() )
self.assertEqual(_lowercase , _lowercase )
| 63 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Optional[Any] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__a : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : List[str] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__a : Optional[int] = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Tuple = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__a : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
import torch
__a : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__a : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__a : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = pipeline("""text-classification""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Optional[int] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = pipeline("""text-classification""" , framework="""tf""" )
__a : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Tuple = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = TextClassificationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a : Union[str, Any] = """HuggingFace is in"""
__a : List[str] = text_classifier(_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__a : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__a : Dict = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}, {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a : Dict = text_classifier(_lowercase , top_k=_lowercase )
__a : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N, [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N] , )
__a : Dict = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__a : Any = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a : Dict = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(_lowercase ):
text_classifier(_lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a : Optional[int] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 63 | 1 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
class a_ ( a ):
A__ : Tuple = CLIPConfig
A__ : Any = ['CLIPEncoderLayer']
def __init__( self : List[Any] , UpperCAmelCase__ : CLIPConfig ):
"""simple docstring"""
super().__init__(UpperCAmelCase__ )
snake_case : Dict = CLIPVisionModelWithProjection(config.vision_config )
snake_case : Optional[int] = nn.Linear(config.vision_config.projection_dim , 1 )
snake_case : Union[str, Any] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str=0.5 , UpperCAmelCase__ : Union[str, Any]=0.5 ):
"""simple docstring"""
snake_case : int = self.vision_model(UpperCAmelCase__ )[0]
snake_case : Dict = self.p_head(UpperCAmelCase__ )
snake_case : Union[str, Any] = nsfw_detected.flatten()
snake_case : Optional[int] = nsfw_detected > p_threshold
snake_case : Union[str, Any] = nsfw_detected.tolist()
if any(UpperCAmelCase__ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(UpperCAmelCase__ ):
if nsfw_detected_:
snake_case : Tuple = np.zeros(images[idx].shape )
snake_case : int = self.w_head(UpperCAmelCase__ )
snake_case : str = watermark_detected.flatten()
snake_case : int = watermark_detected > w_threshold
snake_case : Any = watermark_detected.tolist()
if any(UpperCAmelCase__ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(UpperCAmelCase__ ):
if watermark_detected_:
snake_case : str = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 598 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class a_ ( unittest.TestCase ):
A__ : Optional[int] = StableDiffusionLDMaDPipeline
A__ : int = TEXT_TO_IMAGE_PARAMS
A__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
A__ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
snake_case : Tuple = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
snake_case : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case : str = CLIPTextModel(UpperCAmelCase__ )
snake_case : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple=0 ):
"""simple docstring"""
if str(UpperCAmelCase__ ).startswith('''mps''' ):
snake_case : Tuple = torch.manual_seed(UpperCAmelCase__ )
else:
snake_case : Tuple = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
snake_case : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case : List[str] = self.get_dummy_components()
snake_case : Optional[int] = StableDiffusionLDMaDPipeline(**UpperCAmelCase__ )
snake_case : Union[str, Any] = ldmad_pipe.to(UpperCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : Any = self.get_dummy_inputs(UpperCAmelCase__ )
snake_case : List[Any] = ldmad_pipe(**UpperCAmelCase__ )
snake_case , snake_case : Optional[int] = output.rgb, output.depth
snake_case : Union[str, Any] = rgb[0, -3:, -3:, -1]
snake_case : Dict = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
snake_case : List[Any] = np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
snake_case : Union[str, Any] = np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = self.get_dummy_components()
snake_case : Optional[int] = StableDiffusionLDMaDPipeline(**UpperCAmelCase__ )
snake_case : Optional[int] = ldmad_pipe.to(UpperCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : Tuple = self.get_dummy_inputs(UpperCAmelCase__ )
snake_case : List[Any] = 3 * [inputs['''prompt''']]
# forward
snake_case : int = ldmad_pipe(**UpperCAmelCase__ )
snake_case , snake_case : List[str] = output.rgb, output.depth
snake_case : str = rgb_slice_a[0, -3:, -3:, -1]
snake_case : List[str] = depth_slice_a[0, -3:, -1]
snake_case : List[Any] = self.get_dummy_inputs(UpperCAmelCase__ )
snake_case : List[Any] = 3 * [inputs.pop('''prompt''' )]
snake_case : Union[str, Any] = ldmad_pipe.tokenizer(
UpperCAmelCase__ , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=UpperCAmelCase__ , return_tensors='''pt''' , )
snake_case : Optional[Any] = text_inputs['''input_ids'''].to(UpperCAmelCase__ )
snake_case : List[Any] = ldmad_pipe.text_encoder(UpperCAmelCase__ )[0]
snake_case : Any = prompt_embeds
# forward
snake_case : Union[str, Any] = ldmad_pipe(**UpperCAmelCase__ )
snake_case , snake_case : Optional[int] = output.rgb, output.depth
snake_case : Dict = rgb_slice_a[0, -3:, -3:, -1]
snake_case : Any = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case : Optional[int] = self.get_dummy_components()
snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=UpperCAmelCase__ )
snake_case : List[str] = StableDiffusionLDMaDPipeline(**UpperCAmelCase__ )
snake_case : Dict = ldmad_pipe.to(UpperCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase__ )
snake_case : Dict = '''french fries'''
snake_case : Tuple = ldmad_pipe(**UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
snake_case , snake_case : Tuple = output.rgb, output.depth
snake_case : Optional[int] = rgb[0, -3:, -3:, -1]
snake_case : Any = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
snake_case : List[Any] = np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
snake_case : List[str] = np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str]="cpu" , UpperCAmelCase__ : Optional[int]=torch.floataa , UpperCAmelCase__ : Union[str, Any]=0 ):
"""simple docstring"""
snake_case : List[str] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
snake_case : Dict = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 64, 64) )
snake_case : Optional[Any] = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
snake_case : Tuple = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : List[str] = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
snake_case : Dict = ldmad_pipe.to(UpperCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : Optional[int] = self.get_inputs(UpperCAmelCase__ )
snake_case : Any = ldmad_pipe(**UpperCAmelCase__ )
snake_case , snake_case : Any = output.rgb, output.depth
snake_case : List[Any] = rgb[0, -3:, -3:, -1].flatten()
snake_case : Tuple = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
snake_case : List[str] = np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
snake_case : List[str] = np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any="cpu" , UpperCAmelCase__ : List[str]=torch.floataa , UpperCAmelCase__ : List[str]=0 ):
"""simple docstring"""
snake_case : Optional[int] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
snake_case : int = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 64, 64) )
snake_case : Any = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
snake_case : List[str] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Tuple = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(UpperCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : Optional[int] = self.get_inputs(UpperCAmelCase__ )
snake_case : Optional[Any] = ldmad_pipe(**UpperCAmelCase__ )
snake_case , snake_case : Optional[int] = output.rgb, output.depth
snake_case : Any = 0.49_5586
snake_case : Optional[Any] = 0.3379_5515
snake_case : List[str] = 112.4_8518
snake_case : List[Any] = 98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Dict = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(UpperCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : Dict = self.get_inputs(UpperCAmelCase__ )
snake_case : Optional[Any] = ldmad_pipe(**UpperCAmelCase__ )
snake_case , snake_case : str = output.rgb, output.depth
snake_case : Union[str, Any] = 0.419_4127
snake_case : Union[str, Any] = 0.3537_5586
snake_case : Tuple = 0.563_8502
snake_case : Any = 0.3468_6103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 598 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def _lowerCamelCase ( lowerCamelCase__ : Sequence[int] | None = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
lowercase__ : Tuple = nums[0]
for i in range(1 , len(lowerCamelCase__ ) ):
lowercase__ : Optional[int] = nums[i]
lowercase__ : Any = max(lowerCamelCase__ , ans + num , lowerCamelCase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__snake_case = int(input('Enter number of elements : ').strip())
__snake_case = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array)) | 128 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def UpperCAmelCase__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ) -> Dict:
if tokenize_kwargs is None:
lowercase__ : List[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
lowercase__ : Dict = truncation
lowercase__ : Any = tokenize_kwargs
lowercase__ : List[str] = {}
if return_tensors is not None:
lowercase__ : str = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCAmelCase__( self , lowerCamelCase__ , **lowerCamelCase__ ) -> Dict[str, GenericTensor]:
lowercase__ : Union[str, Any] = self.framework
lowercase__ : Optional[Any] = self.tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
return model_inputs
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]:
lowercase__ : str = self.model(**lowerCamelCase__ )
return model_outputs
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=False ) -> Union[str, Any]:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
return super().__call__(*lowerCamelCase__ , **lowerCamelCase__ ) | 128 | 1 |
from sklearn.metrics import fa_score
import datasets
lowerCAmelCase = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
lowerCAmelCase = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
lowerCAmelCase = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def _A (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=1 , lowerCAmelCase="binary" , lowerCAmelCase=None ):
__lowercase= fa_score(
lowerCAmelCase , lowerCAmelCase , labels=lowerCAmelCase , pos_label=lowerCAmelCase , average=lowerCAmelCase , sample_weight=lowerCAmelCase )
return {"f1": float(lowerCAmelCase ) if score.size == 1 else score}
| 230 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if not is_accelerate_available():
return method
__lowercase= version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase__ ) < version.parse('0.17.0' ):
return method
def wrapper(self , *lowercase__ , **lowercase__ ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase__ , **lowercase__ )
return wrapper
| 230 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
A__: Dict = 5_0000
A__: Optional[int] = 5000
A__ , A__: Optional[int] = os.path.split(__file__)
A__: Union[str, Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : datasets.Dataset ,_UpperCAmelCase : Union[str, Any] ) -> List[str]:
for i in range(_UpperCAmelCase ):
_a : Union[str, Any] =dataset[i]
@get_duration
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : datasets.Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ) -> str:
for i in range(0 ,len(_UpperCAmelCase ) ,_UpperCAmelCase ):
_a : int =dataset[i : i + batch_size]
@get_duration
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : datasets.Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Optional[int]:
with dataset.formatted_as(type=_UpperCAmelCase ):
for i in range(_UpperCAmelCase ):
_a : List[str] =dataset[i]
@get_duration
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : datasets.Dataset ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any] ) -> int:
with dataset.formatted_as(type=_UpperCAmelCase ):
for i in range(0 ,_UpperCAmelCase ,_UpperCAmelCase ):
_a : Optional[int] =dataset[i : i + batch_size]
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : Optional[int] ={"""num examples""": SPEED_TEST_N_EXAMPLES}
_a : Tuple =[
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
_a : Union[str, Any] =[
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
_a : List[str] =datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
_a : Optional[Any] =generate_example_dataset(
os.path.join(_UpperCAmelCase ,"""dataset.arrow""" ) ,_UpperCAmelCase ,num_examples=_UpperCAmelCase ,seq_shapes={"""list""": (100,)} ,)
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ ,str(_UpperCAmelCase ) )
_a : int =func(_UpperCAmelCase ,**_UpperCAmelCase )
print("""shuffling dataset""" )
_a : int =dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ ,func.__name__ ,str(_UpperCAmelCase ) )
_a : Union[str, Any] =func(
_UpperCAmelCase ,**_UpperCAmelCase )
with open(_UpperCAmelCase ,"""wb""" ) as f:
f.write(json.dumps(_UpperCAmelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 506 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ) -> float:
_a : Union[str, Any] =0
while len(_UpperCAmelCase ) > 1:
_a : Any =0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
_a : Optional[int] =files.index(min(_UpperCAmelCase ) )
temp += files[min_index]
files.pop(_UpperCAmelCase )
files.append(_UpperCAmelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 506 | 1 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _lowercase :
def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any=2 , lowerCamelCase__ : int=3_2 , lowerCamelCase__ : int=1_6 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Any=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : List[str]=3_2 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , lowerCamelCase__ : int=4 , lowerCamelCase__ : Optional[Any]=3_7 , lowerCamelCase__ : Optional[int]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Any=0.02 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : str=[1, 3_8_4, 2_4, 2_4] , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : int=None , ) -> List[Any]:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = backbone_out_indices
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = num_labels
A_ = backbone_featmap_shape
A_ = scope
A_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = num_patches + 1
def UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
A_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [9_6, 1_9_2, 3_8_4, 7_6_8],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=lowerCamelCase__ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCamelCase ( self : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
A_ = DPTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : int , lowerCamelCase__ : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : Any ) -> Any:
"""simple docstring"""
A_ = self.num_labels
A_ = DPTForDepthEstimation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCamelCase ( self : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : int ) -> Tuple:
"""simple docstring"""
A_ = self.num_labels
A_ = DPTForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( __lowerCamelCase,__lowerCamelCase,unittest.TestCase ):
_lowercase : List[str] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_lowercase : Dict = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase : Any = False
_lowercase : Dict = False
_lowercase : Dict = False
def UpperCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
A_ = DPTModelTester(self )
A_ = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
pass
def UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(lowerCamelCase__ )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*lowerCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
def UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
if model_class in get_values(lowerCamelCase__ ):
continue
A_ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
A_ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
A_ = model(**lowerCamelCase__ ).loss
loss.backward()
def UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = False
A_ = True
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
A_ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.train()
A_ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
A_ = model(**lowerCamelCase__ ).loss
loss.backward()
def UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
A_ = model_class(config=lowerCamelCase__ )
# Skip the check for the backbone
A_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
A_ = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
A_ = DPTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = '''add'''
with self.assertRaises(lowerCamelCase__ ):
A_ = DPTForDepthEstimation(lowerCamelCase__ )
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
A_ = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
A_ = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(lowerCamelCase__ )
A_ = prepare_img()
A_ = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
A_ = model(**lowerCamelCase__ )
A_ = outputs.predicted_depth
# verify the predicted depth
A_ = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , lowerCamelCase__ )
A_ = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , lowerCamelCase__ , atol=1e-4 ) )
| 203 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = len(SCREAMING_SNAKE_CASE )
A_ = len(SCREAMING_SNAKE_CASE )
A_ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
A_ = []
for char_count in range(SCREAMING_SNAKE_CASE ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 203 | 1 |
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
lowercase_ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
lowercase_ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
lowercase_ = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 | '''simple docstring'''
import argparse
__snake_case = """docs/source/_static/js/custom.js"""
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Any:
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ = f.readlines()
lowercase_ = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ = f"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n"""
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
__snake_case = parser.parse_args()
update_custom_js(args.version)
| 603 | 0 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self :Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
SCREAMING_SNAKE_CASE__ = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(__A )
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ = load_dataset("""nielsr/rvlcdip-demo""" )
SCREAMING_SNAKE_CASE__ = dataset["""train"""][0]["""image"""].convert("""RGB""" )
SCREAMING_SNAKE_CASE__ = image_processor(__A , return_tensors="""pt""" ).to(__A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__A )
SCREAMING_SNAKE_CASE__ = outputs.logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 16) )
self.assertEqual(logits.shape , __A )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=__A , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __A , atol=1E-4 ) ) | 6 | import collections
import importlib.util
import os
import re
from pathlib import Path
_lowercase : List[Any] ='''src/transformers'''
# Matches is_xxx_available()
_lowercase : List[str] =re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_lowercase : Any =re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowercase : Optional[int] =re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_lowercase : int =re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_lowercase : Tuple =re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowercase : str =re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowercase : List[Any] =re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowercase : List[Any] =re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_lowercase : List[str] =re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_lowercase : Any =re.compile(R'''^\s*try:''')
# Catches a line with else:
_lowercase : Optional[int] =re.compile(R'''^\s*else:''')
def A__ ( lowercase: int ) -> Optional[Any]:
if _re_test_backend.search(lowercase ) is None:
return None
A : List[str] =[b[0] for b in _re_backend.findall(lowercase )]
backends.sort()
return "_and_".join(lowercase )
def A__ ( lowercase: Tuple ) -> int:
with open(lowercase, 'r', encoding='utf-8', newline='\n' ) as f:
A : str =f.readlines()
A : List[str] =0
while line_index < len(lowercase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase ):
return None
# First grab the objects without a specific backend in _import_structure
A : Union[str, Any] =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
A : Union[str, Any] =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase ):
A : List[str] =_re_one_line_import_struct.search(lowercase ).groups()[0]
A : Optional[int] =re.findall('\[([^\]]+)\]', lowercase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
A : int =_re_import_struct_key_value.search(lowercase )
if single_line_import_search is not None:
A : List[str] =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase ) > 0]
objects.extend(lowercase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
A : Optional[int] ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
A : Dict =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : int =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : str =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
A : List[Any] =lines[line_index]
if _re_import_struct_add_one.search(lowercase ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase ) is not None:
A : List[str] =_re_import_struct_add_many.search(lowercase ).groups()[0].split(', ' )
A : Optional[Any] =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_between_brackets.search(lowercase ) is not None:
A : int =_re_between_brackets.search(lowercase ).groups()[0].split(', ' )
A : List[str] =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_quote_object.search(lowercase ) is not None:
objects.append(_re_quote_object.search(lowercase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
A : Optional[Any] =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A : int =[]
while (
line_index < len(lowercase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
A : List[str] =lines[line_index]
A : Optional[int] =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
A : Dict ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase ):
# If the line is an if is_backend_available, we grab all objects associated.
A : Optional[Any] =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : str =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[Any] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
A : List[str] =lines[line_index]
A : Optional[Any] =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
A : Any =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowercase: Dict, lowercase: str ) -> int:
def find_duplicates(lowercase: int ):
return [k for k, v in collections.Counter(lowercase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A : Dict =[]
for key in import_dict_objects.keys():
A : Optional[Any] =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A : str =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A : Tuple ='base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A__ ( ) -> int:
A : List[str] =[]
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
A : Optional[int] =os.path.join(lowercase, '__init__.py' )
A : str =parse_init(lowercase )
if objects is not None:
A : Union[str, Any] =analyze_results(*lowercase )
if len(lowercase ) > 0:
A : Optional[int] =F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowercase ) )
if len(lowercase ) > 0:
raise ValueError('\n\n'.join(lowercase ) )
def A__ ( ) -> Dict:
A : List[Any] =[]
for path, directories, files in os.walk(lowercase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowercase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase ) / folder).glob('*.py' ) ) ) == 0:
continue
A : List[Any] =str((Path(lowercase ) / folder).relative_to(lowercase ) )
A : Union[str, Any] =short_path.replace(os.path.sep, '.' )
submodules.append(lowercase )
for fname in files:
if fname == "__init__.py":
continue
A : int =str((Path(lowercase ) / fname).relative_to(lowercase ) )
A : str =short_path.replace('.py', '' ).replace(os.path.sep, '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowercase )
return submodules
_lowercase : Dict =[
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def A__ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
A : Optional[Any] =importlib.util.spec_from_file_location(
'transformers', os.path.join(lowercase, '__init__.py' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
A : List[Any] =spec.loader.load_module()
A : Union[str, Any] =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase ) > 0:
A : List[Any] ='\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 305 | 0 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 703 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case__ : str = 1_6
snake_case__ : List[str] = 3_2
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 16 ) ->Tuple:
_UpperCAmelCase =AutoTokenizer.from_pretrained("bert-base-cased" )
_UpperCAmelCase =DatasetDict(
{
"train": dataset["train"].select(_lowerCamelCase ),
"validation": dataset["train"].select(_lowerCamelCase ),
"test": dataset["validation"],
} )
def tokenize_function(_lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase =tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase =datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase =tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase =16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase =8
else:
_UpperCAmelCase =None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
_UpperCAmelCase =DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
_UpperCAmelCase =DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
_UpperCAmelCase =DataLoader(
tokenized_datasets["test"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
# New Code #
_UpperCAmelCase =[]
# Download the dataset
_UpperCAmelCase =load_dataset("glue" , "mrpc" )
# Create our splits
_UpperCAmelCase =StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase =config["lr"]
_UpperCAmelCase =int(config["num_epochs"] )
_UpperCAmelCase =int(config["seed"] )
_UpperCAmelCase =int(config["batch_size"] )
_UpperCAmelCase =evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase =batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase =MAX_GPU_BATCH_SIZE
set_seed(_lowerCamelCase )
# New Code #
# Create our folds:
_UpperCAmelCase =kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] )
_UpperCAmelCase =[]
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_lowerCamelCase ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =get_fold_dataloaders(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase =AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase =model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase =AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
_UpperCAmelCase =get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase =model(**_lowerCamelCase )
_UpperCAmelCase =outputs.loss
_UpperCAmelCase =loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase =model(**_lowerCamelCase )
_UpperCAmelCase =outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase =accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
_UpperCAmelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _lowerCamelCase )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase =[]
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase =model(**_lowerCamelCase )
_UpperCAmelCase =outputs.logits
_UpperCAmelCase , _UpperCAmelCase =accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_lowerCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase =torch.cat(_lowerCamelCase , dim=0 )
_UpperCAmelCase =torch.stack(_lowerCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase =metric.compute(predictions=_lowerCamelCase , references=_lowerCamelCase )
accelerator.print("Average test metrics from all folds:" , _lowerCamelCase )
def lowerCamelCase__ ( ) ->List[Any]:
_UpperCAmelCase =argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds" , type=_lowerCamelCase , default=3 , help="The number of splits to perform across the dataset" )
_UpperCAmelCase =parser.parse_args()
_UpperCAmelCase ={"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 408 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None ) ->str:
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase =quote(_lowerCamelCase )
return hfh.hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" , revision=_lowerCamelCase )
| 408 | 1 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'vocab_file': 'vocab.txt'}
lowercase_ = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
lowercase_ = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def a ( A__ : Dict ) -> int:
"""simple docstring"""
with open(A__ , 'r' ) as f:
_lowercase =f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase , lowerCAmelCase="<unk>" , lowerCAmelCase="<cls>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<mask>" , lowerCAmelCase="<eos>" , **lowerCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
_lowercase =load_vocab_file(lowerCAmelCase )
_lowercase =dict(enumerate(self.all_tokens ) )
_lowercase ={tok: ind for ind, tok in enumerate(self.all_tokens )}
_lowercase =unk_token
_lowercase =cls_token
_lowercase =pad_token
_lowercase =mask_token
_lowercase =eos_token
_lowercase =self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(lowerCAmelCase , self.unk_token )
def A__ ( self , lowerCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , lowerCAmelCase , **lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return text.split()
def A__ ( self , lowerCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , lowerCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(lowerCAmelCase , self.unk_token )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> List[int]:
'''simple docstring'''
_lowercase =[self.cls_token_id]
_lowercase =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
_lowercase =[1] + ([0] * len(lowerCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCAmelCase ) + [1]
return mask
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Any:
'''simple docstring'''
_lowercase =os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(lowerCAmelCase , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = False ) -> int:
'''simple docstring'''
return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase )
| 380 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = OpenAIGPTTokenizer
_a = OpenAIGPTTokenizerFast
_a = True
_a = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_lowercase =dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
_lowercase =['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
return "lower newer", "lower newer"
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_lowercase ='lower'
_lowercase =['low', 'er</w>']
_lowercase =tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
_lowercase =tokens + ['<unk>']
_lowercase =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
def A__ ( self , lowerCAmelCase=15 ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
# Simple input
_lowercase ='This is a simple input'
_lowercase =['This is a simple input 1', 'This is a simple input 2']
_lowercase =('This is a simple input', 'This is a pair')
_lowercase =[
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(
lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' , )
# Pair input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(
lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' , )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
pass
| 380 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : List[str] = KandinskyImgaImgPipeline
_UpperCamelCase : List[str] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_UpperCamelCase : Dict = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_UpperCamelCase : Optional[int] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Tuple = False
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return self.time_input_dim
@property
def __a ( self ):
return self.time_input_dim * 4
@property
def __a ( self ):
return 1_0_0
@property
def __a ( self ):
_lowercase : int = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Tuple = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_lowercase : Any = MultilingualCLIP(_lowerCAmelCase )
_lowercase : List[str] = text_encoder.eval()
return text_encoder
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Tuple = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowercase : Tuple = UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def __a ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self ):
_lowercase : Dict = self.dummy_text_encoder
_lowercase : Dict = self.dummy_tokenizer
_lowercase : Any = self.dummy_unet
_lowercase : Optional[int] = self.dummy_movq
_lowercase : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.0_00_85,
'beta_end': 0.0_12,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_lowercase : int = DDIMScheduler(**_lowerCAmelCase )
_lowercase : Tuple = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
_lowercase : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCAmelCase )
# create init_image
_lowercase : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase : Union[str, Any] = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
if str(_lowerCAmelCase ).startswith('mps' ):
_lowercase : str = torch.manual_seed(_lowerCAmelCase )
else:
_lowercase : Optional[int] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowercase : Optional[int] = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __a ( self ):
_lowercase : List[Any] = 'cpu'
_lowercase : Union[str, Any] = self.get_dummy_components()
_lowercase : str = self.pipeline_class(**_lowerCAmelCase )
_lowercase : List[str] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Dict = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
_lowercase : Tuple = output.images
_lowercase : Union[str, Any] = pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
_lowercase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowercase : Union[str, Any] = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
_lowercase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowercase : Optional[int] = 'A red cartoon frog, 4k'
_lowercase : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
_lowercase : Tuple = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
_lowercase : Tuple = pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
_lowercase , _lowercase : Any = pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowercase : Any = pipeline(
_lowerCAmelCase , image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='np' , )
_lowercase : Union[str, Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 66 |
from pathlib import Path
import fire
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = Path(_UpperCamelCase)
UpperCamelCase = Path(_UpperCamelCase)
dest_dir.mkdir(exist_ok=_UpperCamelCase)
for path in src_dir.iterdir():
UpperCamelCase = [x.rstrip() for x in list(path.open().readlines())][:n]
UpperCamelCase = dest_dir.joinpath(path.name)
print(_UpperCamelCase)
dest_path.open('w').write('\n'.join(_UpperCamelCase))
if __name__ == "__main__":
fire.Fire(minify)
| 280 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""ConvNextFeatureExtractor"""]
__UpperCAmelCase = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure) | 721 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'mock-s3-bucket'
snake_case: int = f"""s3://{mock_bucket}"""
snake_case: Any = extract_path_from_uri(__A )
assert dataset_path.startswith('s3://' ) is False
snake_case: Union[str, Any] = './local/path'
snake_case: Union[str, Any] = extract_path_from_uri(__A )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( __A : Any ):
'''simple docstring'''
snake_case: List[str] = is_remote_filesystem(__A )
assert is_remote is True
snake_case: int = fsspec.filesystem('file' )
snake_case: int = is_remote_filesystem(__A )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class' , __A )
def lowerCAmelCase_ ( __A : Optional[int] , __A : int , __A : str , __A : Optional[Any] , __A : List[str] , __A : Optional[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: Optional[Any] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
snake_case: Optional[int] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case: str = f"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
snake_case: List[str] = fsspec.filesystem(compression_fs_class.protocol , fo=__A )
assert isinstance(__A , __A )
snake_case: Any = os.path.basename(__A )
snake_case: int = expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(__A , 'r' , encoding='utf-8' ) as f, open(__A , encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol' , ['zip', 'gzip'] )
def lowerCAmelCase_ ( __A : Any , __A : int , __A : int ):
'''simple docstring'''
snake_case: List[str] = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
snake_case: str = compressed_file_paths[protocol]
snake_case: Dict = 'dataset.jsonl'
snake_case: Optional[Any] = f"""{protocol}://{member_file_path}::{compressed_file_path}"""
snake_case , *snake_case: List[Any] = fsspec.get_fs_token_paths(__A )
assert fs.isfile(__A )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : List[Any] ):
'''simple docstring'''
snake_case: Tuple = hf_api.dataset_info(__A , token=__A )
snake_case: List[str] = HfFileSystem(repo_info=__A , token=__A )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(__A ) as f:
assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read()
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__A , __A , clobber=__A )
with pytest.warns(__A ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__A ) == 1
assert (
str(warning_info[0].message )
== f"""A filesystem protocol was already set for {protocol} and will be overwritten."""
) | 692 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Union[str, Any] ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_lowercase : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 305 | from collections.abc import Callable
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Callable | None = None ) -> None:
# Stores actual heap items.
A : list =[]
# Stores indexes of each item for supporting updates and deletion.
A : dict ={}
# Stores current size of heap.
A : Tuple =0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
A : Any =key or (lambda SCREAMING_SNAKE_CASE__ : x)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> int | None:
A : Tuple =int(2 * i + 1 )
return left if 0 < left < self.size else None
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> int | None:
A : List[str] =int(2 * i + 2 )
return right if 0 < right < self.size else None
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
A , A : Tuple =(
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
A , A : Tuple =self.arr[j], self.arr[i]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> bool:
return self.arr[i][1] < self.arr[j][1]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> int:
A : List[Any] =self._left(SCREAMING_SNAKE_CASE__ )
A : int =self._right(SCREAMING_SNAKE_CASE__ )
A : Dict =i
if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Tuple =left
if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Dict =right
return valid_parent
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> None:
A : Dict =self._parent(SCREAMING_SNAKE_CASE__ )
while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A , A : int =parent, self._parent(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> None:
A : str =self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
while valid_parent != index:
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A , A : Optional[int] =valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
if item not in self.pos_map:
return
A : Any =self.pos_map[item]
A : Any =[item, self.key(SCREAMING_SNAKE_CASE__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> None:
if item not in self.pos_map:
return
A : Any =self.pos_map[item]
del self.pos_map[item]
A : List[Any] =self.arr[self.size - 1]
A : int =index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
A : Optional[int] =len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] )
else:
A : Optional[int] =[item, self.key(SCREAMING_SNAKE_CASE__ )]
A : Optional[int] =self.size
self.size += 1
self._heapify_up(self.size - 1 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> tuple | None:
return self.arr[0] if self.size else None
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> tuple | None:
A : Optional[Any] =self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305 | 1 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
print("""Making key files...""" )
make_key_files("""rsa""", 1_024 )
print("""Key files generation successful.""" )
def __lowerCamelCase ( __snake_case : int ) -> tuple[tuple[int, int], tuple[int, int]]:
"""simple docstring"""
print("""Generating prime p...""" )
A__ : Optional[Any] =rabinMiller.generate_large_prime(__snake_case )
print("""Generating prime q...""" )
A__ : Any =rabinMiller.generate_large_prime(__snake_case )
A__ : str =p * q
print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" )
while True:
A__ : List[Any] =random.randrange(2 ** (key_size - 1), 2 ** (key_size) )
if cryptoMath.gcd(__snake_case, (p - 1) * (q - 1) ) == 1:
break
print("""Calculating d that is mod inverse of e...""" )
A__ : Tuple =cryptoMath.find_mod_inverse(__snake_case, (p - 1) * (q - 1) )
A__ : int =(n, e)
A__ : Optional[Any] =(n, d)
return (public_key, private_key)
def __lowerCamelCase ( __snake_case : str, __snake_case : int ) -> None:
"""simple docstring"""
if os.path.exists(f"{name}_pubkey.txt" ) or os.path.exists(f"{name}_privkey.txt" ):
print("""\nWARNING:""" )
print(
f"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
A__ : Union[str, Any] =generate_key(__snake_case )
print(f"\nWriting public key to file {name}_pubkey.txt..." )
with open(f"{name}_pubkey.txt", """w""" ) as out_file:
out_file.write(f"{key_size},{public_key[0]},{public_key[1]}" )
print(f"Writing private key to file {name}_privkey.txt..." )
with open(f"{name}_privkey.txt", """w""" ) as out_file:
out_file.write(f"{key_size},{private_key[0]},{private_key[1]}" )
if __name__ == "__main__":
main()
| 706 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__snake_case : str = False
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int ="""A painting of a squirrel eating a burger """
A__ : Tuple =torch.manual_seed(0 )
A__ : int =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : str =VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int =generator.manual_seed(0 )
A__ : Tuple =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : Any =VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Dict ="""A painting of a squirrel eating a burger """
A__ : Optional[int] =torch.manual_seed(0 )
A__ : List[str] =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
A__ : List[str] =image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ : Tuple =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 687 | 0 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
snake_case : Optional[int] = """src/transformers"""
snake_case : Dict = """docs/source/en"""
snake_case : int = """."""
def A ( __snake_case: List[Any] , __snake_case: int , __snake_case: Optional[int] ) -> str:
"""simple docstring"""
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__magic_name__ = f.readlines()
# Find the start prompt.
__magic_name__ = 0
while not lines[start_index].startswith(lowerCAmelCase__ ):
start_index += 1
start_index += 1
__magic_name__ = start_index
while not lines[end_index].startswith(lowerCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
snake_case : Dict = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
snake_case : int = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
snake_case : List[Any] = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
snake_case : List[Any] = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
snake_case : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
def A ( __snake_case: Tuple ) -> Tuple:
"""simple docstring"""
__magic_name__ = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowerCAmelCase__ )
return [m.group(0 ) for m in matches]
def A ( __snake_case: int , __snake_case: Optional[int] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase__ )
__magic_name__ = (width - text_length) // 2
__magic_name__ = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def A ( ) -> Dict:
"""simple docstring"""
__magic_name__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__magic_name__ = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__magic_name__ = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__magic_name__ = collections.defaultdict(lowerCAmelCase__ )
__magic_name__ = collections.defaultdict(lowerCAmelCase__ )
__magic_name__ = collections.defaultdict(lowerCAmelCase__ )
__magic_name__ = collections.defaultdict(lowerCAmelCase__ )
__magic_name__ = collections.defaultdict(lowerCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase__ ):
__magic_name__ = None
if attr_name.endswith('Tokenizer' ):
__magic_name__ = slow_tokenizers
__magic_name__ = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
__magic_name__ = fast_tokenizers
__magic_name__ = attr_name[:-1_3]
elif _re_tf_models.match(lowerCAmelCase__ ) is not None:
__magic_name__ = tf_models
__magic_name__ = _re_tf_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase__ ) is not None:
__magic_name__ = flax_models
__magic_name__ = _re_flax_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase__ ) is not None:
__magic_name__ = pt_models
__magic_name__ = _re_pt_models.match(lowerCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
__magic_name__ = True
break
# Try again after removing the last word in the name
__magic_name__ = ''''''.join(camel_case_split(lowerCAmelCase__ )[:-1] )
# Let's build that table!
__magic_name__ = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__magic_name__ = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__magic_name__ = [len(lowerCAmelCase__ ) + 2 for c in columns]
__magic_name__ = max([len(lowerCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
__magic_name__ = '''|''' + '''|'''.join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for c, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
__magic_name__ = {True: '''✅''', False: '''❌'''}
for name in model_names:
__magic_name__ = model_name_to_prefix[name]
__magic_name__ = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for l, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + "|\n"
return table
def A ( __snake_case: str=False ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = _find_text_in_file(
filename=os.path.join(lowerCAmelCase__ , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
__magic_name__ = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase__ , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
snake_case : Union[str, Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite) | 545 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionInpaintPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
UpperCAmelCase__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase__ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : str , _A : Dict , _A : Any=0 ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : List[Any] = torch.manual_seed(_A )
else:
UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A )
UpperCAmelCase__ : List[str] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Any = sd_pipe(**_A ).images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : str = torch.manual_seed(0 )
UpperCAmelCase__ : str = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained(
_A , torch_dtype=torch.floataa , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase_ ( self : Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' )
UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Any = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 75 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : int = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 527 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a : Optional[int] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a : Union[str, Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_UpperCamelCase ) - np.asarray(_UpperCamelCase )) ** 2 ) )
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_UpperCamelCase , _UpperCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def lowercase_ ( ):
'''simple docstring'''
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) )
benchmark()
| 527 | 1 |
import math
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
if (
not isinstance(_UpperCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
if (
not isinstance(_UpperCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 423 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Optional[Any] = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "swinv2"
_A = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , A_ : Optional[Any]=2_24 , A_ : int=4 , A_ : Any=3 , A_ : List[Any]=96 , A_ : Any=[2, 2, 6, 2] , A_ : str=[3, 6, 12, 24] , A_ : Any=7 , A_ : List[Any]=4.0 , A_ : List[str]=True , A_ : int=0.0 , A_ : str=0.0 , A_ : Dict=0.1 , A_ : Any="gelu" , A_ : int=False , A_ : Optional[int]=0.02 , A_ : Dict=1e-5 , A_ : int=32 , **A_ : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_: str = image_size
lowerCamelCase_: Dict = patch_size
lowerCamelCase_: int = num_channels
lowerCamelCase_: Union[str, Any] = embed_dim
lowerCamelCase_: Optional[int] = depths
lowerCamelCase_: Optional[int] = len(A_ )
lowerCamelCase_: Dict = num_heads
lowerCamelCase_: str = window_size
lowerCamelCase_: List[str] = mlp_ratio
lowerCamelCase_: Any = qkv_bias
lowerCamelCase_: int = hidden_dropout_prob
lowerCamelCase_: str = attention_probs_dropout_prob
lowerCamelCase_: List[str] = drop_path_rate
lowerCamelCase_: Any = hidden_act
lowerCamelCase_: List[str] = use_absolute_embeddings
lowerCamelCase_: Union[str, Any] = layer_norm_eps
lowerCamelCase_: Dict = initializer_range
lowerCamelCase_: Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase_: int = int(embed_dim * 2 ** (len(A_ ) - 1) )
lowerCamelCase_: List[str] = (0, 0, 0, 0)
| 423 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Tuple = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Optional[Any] = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
_lowercase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowercase__ ( snake_case_ :str ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__UpperCAmelCase = model_type_to_module_name(snake_case_ )
__UpperCAmelCase = importlib.import_module(F'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(snake_case_ , snake_case_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(snake_case_ , '''__name__''' , snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__UpperCAmelCase = importlib.import_module('''transformers''' )
if hasattr(snake_case_ , snake_case_ ):
return getattr(snake_case_ , snake_case_ )
return None
def lowercase__ ( snake_case_ :Union[str, os.PathLike] , snake_case_ :Optional[Union[str, os.PathLike]] = None , snake_case_ :bool = False , snake_case_ :bool = False , snake_case_ :Optional[Dict[str, str]] = None , snake_case_ :Optional[Union[bool, str]] = None , snake_case_ :Optional[str] = None , snake_case_ :bool = False , **snake_case_ :Optional[int] , ):
__UpperCAmelCase = get_file_from_repo(
snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(snake_case_ , encoding='''utf-8''' ) as reader:
return json.load(snake_case_ )
class _UpperCAmelCase :
def __init__( self : Optional[int] ):
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_lowercase )
def a ( cls : Optional[Any] , _lowercase : int , **_lowercase : List[str] ):
__UpperCAmelCase = kwargs.pop('''config''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''trust_remote_code''' , _lowercase )
__UpperCAmelCase = True
__UpperCAmelCase , __UpperCAmelCase = ImageProcessingMixin.get_image_processor_dict(_lowercase , **_lowercase )
__UpperCAmelCase = config_dict.get('''image_processor_type''' , _lowercase )
__UpperCAmelCase = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
__UpperCAmelCase = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__UpperCAmelCase = config_dict.pop('''feature_extractor_type''' , _lowercase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
__UpperCAmelCase = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
__UpperCAmelCase = config_dict['''auto_map''']['''AutoFeatureExtractor''']
__UpperCAmelCase = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase , **_lowercase )
# It could be in `config.image_processor_type``
__UpperCAmelCase = getattr(_lowercase , '''image_processor_type''' , _lowercase )
if hasattr(_lowercase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
__UpperCAmelCase = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
__UpperCAmelCase = image_processor_class_from_name(_lowercase )
__UpperCAmelCase = image_processor_auto_map is not None
__UpperCAmelCase = image_processor_class is not None or type(_lowercase ) in IMAGE_PROCESSOR_MAPPING
__UpperCAmelCase = resolve_trust_remote_code(
_lowercase , _lowercase , _lowercase , _lowercase )
if has_remote_code and trust_remote_code:
__UpperCAmelCase = get_class_from_dynamic_module(
_lowercase , _lowercase , **_lowercase )
__UpperCAmelCase = kwargs.pop('''code_revision''' , _lowercase )
if os.path.isdir(_lowercase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_lowercase , **_lowercase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_lowercase , **_lowercase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_lowercase ) in IMAGE_PROCESSOR_MAPPING:
__UpperCAmelCase = IMAGE_PROCESSOR_MAPPING[type(_lowercase )]
return image_processor_class.from_dict(_lowercase , **_lowercase )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def a ( _lowercase : Union[str, Any] , _lowercase : Tuple ):
IMAGE_PROCESSOR_MAPPING.register(_lowercase , _lowercase )
| 397 | 0 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowercase :
@staticmethod
def __UpperCamelCase ( *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int]):
pass
def __UpperCAmelCase ( lowerCamelCase_) -> str:
UpperCamelCase__ : int = hashlib.mda(image.tobytes())
return m.hexdigest()[:10]
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
UpperCamelCase__ : int = np.array(lowerCamelCase_)
UpperCamelCase__ : str = npimg.shape
return {"hash": hashimage(lowerCamelCase_), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowercase (unittest.TestCase ):
_lowerCamelCase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_lowerCamelCase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str):
UpperCamelCase__ : int = MaskGenerationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF')
def __UpperCamelCase ( self : Tuple):
pass
@slow
@require_torch
def __UpperCamelCase ( self : int):
UpperCamelCase__ : int = pipeline('mask-generation' , model='facebook/sam-vit-huge')
UpperCamelCase__ : Any = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256)
# Shortening by hashing
UpperCamelCase__ : List[Any] = []
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(UpperCAmelCase_), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_21},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.99_67},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_93},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.99_09},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.98_79},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.98_34},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.97_16},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.96_12},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.95_99},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.95_52},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.95_32},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.95_16},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.94_99},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.94_83},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.94_64},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_43},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_43},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.94_08},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.93_35},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.93_26},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.92_62},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.89_99},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.89_86},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.89_84},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.88_73},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Dict = 'facebook/sam-vit-huge'
UpperCamelCase__ : List[str] = pipeline('mask-generation' , model=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256)
# Shortening by hashing
UpperCamelCase__ : Dict = []
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(UpperCAmelCase_), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.02_10},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53},
] , )
| 596 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = (DDIMParallelScheduler,)
_lowerCamelCase = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def __UpperCamelCase ( self : str , **UpperCAmelCase_ : Union[str, Any]):
UpperCamelCase__ : Optional[int] = {
'num_train_timesteps': 1_000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**UpperCAmelCase_)
return config
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : List[Any] = self.scheduler_classes[0]
UpperCamelCase__ : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = scheduler_class(**UpperCAmelCase_)
UpperCamelCase__, UpperCamelCase__ : Dict = 10, 0.0
UpperCamelCase__ : Any = self.dummy_model()
UpperCamelCase__ : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase_)
for t in scheduler.timesteps:
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Tuple = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_).prev_sample
return sample
def __UpperCamelCase ( self : Any):
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def __UpperCamelCase ( self : str):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCAmelCase_)
UpperCamelCase__ : Any = self.scheduler_classes[0]
UpperCamelCase__ : List[Any] = self.get_scheduler_config(steps_offset=1)
UpperCamelCase__ : Any = scheduler_class(**UpperCAmelCase_)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1]))
def __UpperCamelCase ( self : Any):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def __UpperCamelCase ( self : Any):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def __UpperCamelCase ( self : Tuple):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCAmelCase_)
def __UpperCamelCase ( self : int):
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def __UpperCamelCase ( self : Tuple):
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCAmelCase_)
def __UpperCamelCase ( self : int):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500]):
self.check_over_forward(time_step=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any]):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=UpperCAmelCase_ , eta=UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Any = self.scheduler_classes[0]
UpperCamelCase__ : Optional[int] = self.get_scheduler_config()
UpperCamelCase__ : Tuple = scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400) - 0.1_47_71)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960) - 0.3_24_60)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486) - 0.0_09_79)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998) - 0.02)) < 1e-5
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Union[str, Any] = self.scheduler_classes[0]
UpperCamelCase__ : Optional[Any] = self.get_scheduler_config()
UpperCamelCase__ : Optional[Any] = scheduler_class(**UpperCAmelCase_)
UpperCamelCase__, UpperCamelCase__ : str = 10, 0.0
scheduler.set_timesteps(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.dummy_model()
UpperCamelCase__ : Optional[Any] = self.dummy_sample_deter
UpperCamelCase__ : Optional[Any] = self.dummy_sample_deter + 0.1
UpperCamelCase__ : str = self.dummy_sample_deter - 0.1
UpperCamelCase__ : List[Any] = samplea.shape[0]
UpperCamelCase__ : List[str] = torch.stack([samplea, samplea, samplea] , dim=0)
UpperCamelCase__ : Optional[Any] = torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
UpperCamelCase__ : List[str] = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
UpperCamelCase__ : List[str] = scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : Union[str, Any] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 11_47.79_04) < 1e-2
assert abs(result_mean.item() - 0.49_82) < 1e-3
def __UpperCamelCase ( self : str):
UpperCamelCase__ : int = self.full_loop()
UpperCamelCase__ : Any = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : List[Any] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1_72.00_67) < 1e-2
assert abs(result_mean.item() - 0.22_39_67) < 1e-3
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Tuple = self.full_loop(prediction_type='v_prediction')
UpperCamelCase__ : Union[str, Any] = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : Optional[int] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 52.53_02) < 1e-2
assert abs(result_mean.item() - 0.06_84) < 1e-3
def __UpperCamelCase ( self : Tuple):
# We specify different beta, so that the first alpha is 0.99
UpperCamelCase__ : int = self.full_loop(set_alpha_to_one=UpperCAmelCase_ , beta_start=0.01)
UpperCamelCase__ : Optional[int] = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : List[Any] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1_49.82_95) < 1e-2
assert abs(result_mean.item() - 0.19_51) < 1e-3
def __UpperCamelCase ( self : str):
# We specify different beta, so that the first alpha is 0.99
UpperCamelCase__ : Dict = self.full_loop(set_alpha_to_one=UpperCAmelCase_ , beta_start=0.01)
UpperCamelCase__ : Union[str, Any] = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : str = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1_49.07_84) < 1e-2
assert abs(result_mean.item() - 0.19_41) < 1e-3
| 596 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 715 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class snake_case :
def __init__( self :Dict , _lowerCamelCase :List[str] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = str(id_ )
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Optional[Any] = {} # {vertex:distance}
def __lt__( self :Any , _lowerCamelCase :Any ):
return self.key < other.key
def __repr__( self :Any ):
return self.id
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :str ):
self.neighbors.append(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , _lowerCamelCase :Any , _lowerCamelCase :Tuple ):
__SCREAMING_SNAKE_CASE : int = weight
def lowerCAmelCase_ ( lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Dict ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowercase_ )
graph[b - 1].add_edge(graph[a - 1] , lowercase_ )
def lowerCAmelCase_ ( lowercase_ : list , lowercase_ : Vertex ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = []
for u in graph:
__SCREAMING_SNAKE_CASE : Tuple = math.inf
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Dict = graph[:]
while q:
__SCREAMING_SNAKE_CASE : Tuple = min(lowercase_ )
q.remove(lowercase_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__SCREAMING_SNAKE_CASE : Tuple = u
__SCREAMING_SNAKE_CASE : List[str] = u.edges[v.id]
for i in range(1 , len(lowercase_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowerCAmelCase_ ( lowercase_ : list , lowercase_ : Vertex ):
'''simple docstring'''
for u in graph:
__SCREAMING_SNAKE_CASE : Optional[Any] = math.inf
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = list(lowercase_ )
hq.heapify(lowercase_ )
while h:
__SCREAMING_SNAKE_CASE : int = hq.heappop(lowercase_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__SCREAMING_SNAKE_CASE : Union[str, Any] = u
__SCREAMING_SNAKE_CASE : int = u.edges[v.id]
hq.heapify(lowercase_ )
for i in range(1 , len(lowercase_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowerCAmelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 401 | 0 |
'''simple docstring'''
from __future__ import annotations
_SCREAMING_SNAKE_CASE = list[tuple[int, int]]
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : float , __snake_case : Node | None , )-> Dict:
snake_case = pos_x
snake_case = pos_y
snake_case = (pos_y, pos_x)
snake_case = goal_x
snake_case = goal_y
snake_case = g_cost
snake_case = parent
snake_case = self.calculate_heuristic()
def lowerCAmelCase ( self : Union[str, Any] )-> Any:
snake_case = abs(self.pos_x - self.goal_x )
snake_case = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : int , __snake_case : Union[str, Any] )-> Any:
return self.f_cost < other.f_cost
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , __snake_case : tuple[int, int] , __snake_case : tuple[int, int] )-> List[Any]:
snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCAmelCase_ )
snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , UpperCAmelCase_ )
snake_case = [self.start]
snake_case = []
snake_case = False
def lowerCAmelCase ( self : Any )-> Any:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
snake_case = True
return self.retrace_path(UpperCAmelCase_ )
self.closed_nodes.append(UpperCAmelCase_ )
snake_case = self.get_successors(UpperCAmelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCAmelCase_ )
else:
# retrieve the best current path
snake_case = self.open_nodes.pop(self.open_nodes.index(UpperCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCAmelCase_ )
else:
self.open_nodes.append(UpperCAmelCase_ )
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase ( self : List[Any] , __snake_case : Node )-> Union[str, Any]:
snake_case = []
for action in delta:
snake_case = parent.pos_x + action[1]
snake_case = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCAmelCase_ , UpperCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCAmelCase_ , ) )
return successors
def lowerCAmelCase ( self : Dict , __snake_case : Node | None )-> Dict:
snake_case = node
snake_case = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
_SCREAMING_SNAKE_CASE = GreedyBestFirst(init, goal)
_SCREAMING_SNAKE_CASE = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_SCREAMING_SNAKE_CASE = 2
for elem in grid:
print(elem)
| 369 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase__ :
A__ : Optional[str] =field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
A__ : Optional[str] =field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
A__ : Optional[str] =field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
A__ : Optional[str] =field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
A__ : Optional[int] =field(default=2 , metadata={"""help""": """Batch size for training."""} )
A__ : Optional[int] =field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
A__ : Optional[float] =field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
A__ : Optional[int] =field(
default=1_0_0_0_0 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
A__ : Optional[float] =field(default=2e-4 , metadata={"""help""": """Learning rate fo training."""} )
A__ : Optional[str] =field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
A__ : Optional[int] =field(
default=7_5_0 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
A__ : Optional[int] =field(
default=1_6 , metadata={"""help""": """Number of gradient accumulation steps."""} )
A__ : Optional[bool] =field(
default=_UpperCAmelCase , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
A__ : Optional[int] =field(default=5_0_0_0_0 , metadata={"""help""": """Maximum number of training steps."""} )
A__ : Optional[int] =field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
A__ : Optional[int] =field(default=1_0_2_4 , metadata={"""help""": """Sequence lengths used for training."""} )
A__ : Optional[int] =field(default=1 , metadata={"""help""": """Training seed."""} )
A__ : Optional[int] =field(
default=1_0_2_4 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
A__ : Optional[str] =field(
default=_UpperCAmelCase , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
A__ : Optional[bool] =field(default=_UpperCAmelCase , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class lowercase__ :
A__ : Optional[str] =field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
A__ : Optional[str] =field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
A__ : Optional[int] =field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
A__ : Optional[int] =field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
A__ : Optional[int] =field(default=1_0_2_4 , metadata={"""help""": """Length of sequences to be evaluated."""} )
A__ : Optional[int] =field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class lowercase__ :
A__ : Optional[str] =field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
A__ : Optional[int] =field(default=_UpperCAmelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
A__ : Optional[int] =field(
default=_UpperCAmelCase , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
A__ : Optional[bool] =field(
default=_UpperCAmelCase , metadata={"""help""": """Sample from the language model's output distribution."""} )
A__ : Optional[float] =field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
A__ : Optional[int] =field(default=2_5_6 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
A__ : Optional[int] =field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
A__ : Optional[float] =field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
A__ : Optional[int] =field(default=1_0 , metadata={"""help""": """Number of generations to run in parallel."""} )
A__ : Optional[int] =field(
default=2_0_0 , metadata={"""help""": """Number of completions to generate for each sample."""} )
A__ : Optional[int] =field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
A__ : Optional[str] =field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
A__ : Optional[str] =field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
A__ : Optional[int] =field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class lowercase__ :
A__ : Optional[int] =field(
default=_UpperCAmelCase , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
A__ : Optional[str] =field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
A__ : Optional[str] =field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
A__ : Optional[int] =field(
default=1_0_0_0_0_0 , metadata={"""help""": """Number of files to save per JSON output file."""} )
A__ : Optional[str] =field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
A__ : Optional[float] =field(
default=1_0_0_0 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
A__ : Optional[float] =field(
default=1_0_0 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
A__ : Optional[float] =field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
A__ : Optional[float] =field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
A__ : Optional[float] =field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
A__ : Optional[str] =field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
A__ : Optional[bool] =field(
default=_UpperCAmelCase , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
A__ : Optional[float] =field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class lowercase__ :
A__ : Optional[str] =field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
A__ : Optional[str] =field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
A__ : Optional[str] =field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
A__ : Optional[int] =field(default=2_0_0_0_0_0 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
A__ : Optional[int] =field(
default=3_2_7_6_8 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
A__ : Optional[str] =field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
A__ : Optional[bool] =field(default=_UpperCAmelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class lowercase__ :
A__ : Optional[str] =field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
A__ : Optional[str] =field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
A__ : Optional[str] =field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
A__ : Optional[int] =field(default=_UpperCAmelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class lowercase__ :
A__ : Optional[str] =field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
A__ : Optional[str] =field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
A__ : Optional[str] =field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
A__ : Optional[bool] =field(default=_UpperCAmelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 472 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=13 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=30 ,SCREAMING_SNAKE_CASE_=400 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
snake_case : Union[str, Any] = size if size is not None else {"""height""": 18, """width""": 18}
snake_case : Optional[Any] = parent
snake_case : Optional[Any] = batch_size
snake_case : List[Any] = num_channels
snake_case : List[str] = image_size
snake_case : Union[str, Any] = min_resolution
snake_case : Any = max_resolution
snake_case : Union[str, Any] = do_resize
snake_case : Optional[Any] = size
snake_case : Any = do_normalize
snake_case : Union[str, Any] = image_mean
snake_case : Dict = image_std
def snake_case_ ( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _A ( snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = ViTImageProcessor if is_vision_available() else None
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = EfficientFormerImageProcessorTester(self )
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ ,"""image_mean""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ ,"""image_std""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ ,"""do_normalize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ ,"""do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ ,"""size""" ) )
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Any = prepare_image_inputs(self.image_proc_tester ,equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ ,Image.Image )
# Test not batched input
snake_case : Union[str, Any] = image_processor(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) ,)
# Test batched
snake_case : List[str] = image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) ,)
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : int = prepare_image_inputs(self.image_proc_tester ,equal_resolution=SCREAMING_SNAKE_CASE_ ,numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ ,np.ndarray )
# Test not batched input
snake_case : int = image_processor(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) ,)
# Test batched
snake_case : Optional[int] = image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) ,)
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : int = prepare_image_inputs(self.image_proc_tester ,equal_resolution=SCREAMING_SNAKE_CASE_ ,torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ ,torch.Tensor )
# Test not batched input
snake_case : List[str] = image_processor(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) ,)
# Test batched
snake_case : Dict = image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) ,)
| 715 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Optional[int] = logging.get_logger(__name__)
def lowercase ( __A : str ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = torch.load(__A , map_location="""cpu""" )
if "model" in sd.keys():
snake_case : Any = torch.load(__A , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
snake_case : Optional[Any] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(__A )
snake_case : List[Any] = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
snake_case : int = sd.pop(__A )
snake_case : Optional[int] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
snake_case : List[str] = sd[key]
# We split QKV in separate Q,K,V
snake_case : Dict = key.replace(""".qkv_proj.""" , """.q_proj.""" )
snake_case : Any = key.replace(""".qkv_proj.""" , """.k_proj.""" )
snake_case : List[str] = key.replace(""".qkv_proj.""" , """.v_proj.""" )
snake_case : List[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
snake_case , snake_case , snake_case : str = torch.split(__A , depth // 3 , dim=0 )
snake_case : Tuple = q
snake_case : List[Any] = k
snake_case : List[Any] = v
del sd[key]
return sd
@torch.no_grad()
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str]=None ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = load_checkpoint(__A )
if config is not None:
snake_case : List[Any] = OPTConfig.from_pretrained(__A )
else:
snake_case : Any = OPTConfig()
snake_case : Union[str, Any] = OPTModel(__A ).half().eval()
model.load_state_dict(__A )
# Check results
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__lowercase : Any = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 315 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.