code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : str = ShapEPipeline
UpperCAmelCase__ : Union[str, Any] = ["prompt"]
UpperCAmelCase__ : List[str] = ["prompt"]
UpperCAmelCase__ : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Optional[Any] = False
@property
def snake_case_ ( self ) -> List[Any]:
return 32
@property
def snake_case_ ( self ) -> List[Any]:
return 32
@property
def snake_case_ ( self ) -> Dict:
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> Optional[int]:
return 8
@property
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def snake_case_ ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCamelCase : str = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def snake_case_ ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase : List[Any] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCamelCase : Tuple = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def snake_case_ ( self ) -> str:
UpperCamelCase : List[Any] = self.dummy_prior
UpperCamelCase : Union[str, Any] = self.dummy_text_encoder
UpperCamelCase : List[str] = self.dummy_tokenizer
UpperCamelCase : Dict = self.dummy_renderer
UpperCamelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp', num_train_timesteps=1024, prediction_type='sample', use_karras_sigmas=SCREAMING_SNAKE_CASE_, clip_sample=SCREAMING_SNAKE_CASE_, clip_sample_range=1.0, )
UpperCamelCase : Tuple = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Dict:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def snake_case_ ( self ) -> Any:
UpperCamelCase : int = 'cpu'
UpperCamelCase : str = self.get_dummy_components()
UpperCamelCase : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = output.images[0]
UpperCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase : Optional[Any] = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[Any] = torch_device == 'cpu'
UpperCamelCase : Tuple = True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=SCREAMING_SNAKE_CASE_, relax_max_difference=SCREAMING_SNAKE_CASE_, )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = 1
UpperCamelCase : List[Any] = 2
UpperCamelCase : Dict = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase : Dict = batch_size * [inputs[key]]
UpperCamelCase : List[str] = pipe(**SCREAMING_SNAKE_CASE_, num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> int:
UpperCamelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
UpperCamelCase : Optional[Any] = ShapEPipeline.from_pretrained('openai/shap-e' )
UpperCamelCase : Tuple = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCamelCase : List[Any] = pipe(
'a shark', generator=SCREAMING_SNAKE_CASE_, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type='np', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 40 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'time_series_transformer'
SCREAMING_SNAKE_CASE : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Union[str, Any] ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[int] = None ,lowercase__ : str = "student_t" ,lowercase__ : str = "nll" ,lowercase__ : int = 1 ,lowercase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] ,lowercase__ : Optional[Union[str, bool]] = "mean" ,lowercase__ : int = 0 ,lowercase__ : int = 0 ,lowercase__ : int = 0 ,lowercase__ : int = 0 ,lowercase__ : Optional[List[int]] = None ,lowercase__ : Optional[List[int]] = None ,lowercase__ : int = 3_2 ,lowercase__ : int = 3_2 ,lowercase__ : int = 2 ,lowercase__ : int = 2 ,lowercase__ : int = 2 ,lowercase__ : int = 2 ,lowercase__ : bool = True ,lowercase__ : str = "gelu" ,lowercase__ : int = 6_4 ,lowercase__ : float = 0.1 ,lowercase__ : float = 0.1 ,lowercase__ : float = 0.1 ,lowercase__ : float = 0.1 ,lowercase__ : float = 0.1 ,lowercase__ : int = 1_0_0 ,lowercase__ : float = 0.0_2 ,lowercase__ : Any=True ,**lowercase__ : List[str] ,):
# time series specific configuration
__lowercase = prediction_length
__lowercase = context_length or prediction_length
__lowercase = distribution_output
__lowercase = loss
__lowercase = input_size
__lowercase = num_time_features
__lowercase = lags_sequence
__lowercase = scaling
__lowercase = num_dynamic_real_features
__lowercase = num_static_real_features
__lowercase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowercase__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__lowercase = cardinality
else:
__lowercase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowercase__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__lowercase = embedding_dimension
else:
__lowercase = [min(5_0 ,(cat + 1) // 2 ) for cat in self.cardinality]
__lowercase = num_parallel_samples
# Transformer architecture configuration
__lowercase = input_size * len(lowercase__ ) + self._number_of_features
__lowercase = d_model
__lowercase = encoder_attention_heads
__lowercase = decoder_attention_heads
__lowercase = encoder_ffn_dim
__lowercase = decoder_ffn_dim
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = activation_function
__lowercase = init_std
__lowercase = use_cache
super().__init__(is_encoder_decoder=lowercase__ ,**lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 41 |
from manim import *
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_SCREAMING_SNAKE_CASE =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait() | 691 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'visual_bert'
def __init__( self , SCREAMING_SNAKE_CASE_=30522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = visual_embedding_dim
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = bypass_transformer
lowerCamelCase_ = special_visual_initialize
| 42 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case_ : List[str] = {'''facebook/blenderbot-3B''': 1_28}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = BlenderbotTokenizer
def __init__( self : Dict , _a : str=None , _a : Optional[int]=None , _a : List[str]=None , _a : int="replace" , _a : Dict="<s>" , _a : Optional[Any]="</s>" , _a : Any="</s>" , _a : int="<s>" , _a : int="<unk>" , _a : Optional[int]="<pad>" , _a : Tuple="<mask>" , _a : Tuple=False , _a : Union[str, Any]=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE ='''post_processor'''
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''sep'''] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''cls'''] )
_SCREAMING_SNAKE_CASE =False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Optional[Any] , *_a : str , **_a : int ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : List[Any] , *_a : Optional[int] , **_a : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Dict , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Any , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_SCREAMING_SNAKE_CASE =''' '''.join(_a )
_SCREAMING_SNAKE_CASE =self.encode(_a )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 691 | 0 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase = logging.getLogger(__name__)
lowerCAmelCase = tf.data.AUTOTUNE
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=SCREAMING_SNAKE_CASE , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=SCREAMING_SNAKE_CASE , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=SCREAMING_SNAKE_CASE , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=SCREAMING_SNAKE_CASE , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=SCREAMING_SNAKE_CASE , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=SCREAMING_SNAKE_CASE , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=SCREAMING_SNAKE_CASE , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=SCREAMING_SNAKE_CASE , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=SCREAMING_SNAKE_CASE , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=SCREAMING_SNAKE_CASE , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=SCREAMING_SNAKE_CASE , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=SCREAMING_SNAKE_CASE , default=5_12 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=SCREAMING_SNAKE_CASE , default=0.15 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=SCREAMING_SNAKE_CASE , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase__ = parser.parse_args()
return args
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
if args.tpu_name:
lowercase__ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase__ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(SCREAMING_SNAKE_CASE )
tf.tpu.experimental.initialize_tpu_system(SCREAMING_SNAKE_CASE )
return tpu
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 0
for file in file_list:
lowercase__ = file.split('''/''' )[-1]
lowercase__ = re.search(R'''-\d+-(\d+)\.tfrecord''' , SCREAMING_SNAKE_CASE ).group(1 )
lowercase__ = int(SCREAMING_SNAKE_CASE )
num_samples += sample_count
return num_samples
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase__ = count_samples(SCREAMING_SNAKE_CASE )
lowercase__ = tf.data.Dataset.from_tensor_slices(SCREAMING_SNAKE_CASE )
if shuffle:
lowercase__ = dataset.shuffle(len(SCREAMING_SNAKE_CASE ) )
lowercase__ = tf.data.TFRecordDataset(SCREAMING_SNAKE_CASE , num_parallel_reads=SCREAMING_SNAKE_CASE )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase__ = dataset.apply(tf.data.experimental.assert_cardinality(SCREAMING_SNAKE_CASE ) )
lowercase__ = dataset.map(SCREAMING_SNAKE_CASE , num_parallel_calls=SCREAMING_SNAKE_CASE )
if shuffle:
assert shuffle_buffer_size is not None
lowercase__ = dataset.shuffle(args.shuffle_buffer_size )
lowercase__ = dataset.batch(SCREAMING_SNAKE_CASE , drop_remainder=SCREAMING_SNAKE_CASE )
lowercase__ = dataset.map(SCREAMING_SNAKE_CASE , num_parallel_calls=SCREAMING_SNAKE_CASE )
lowercase__ = dataset.prefetch(SCREAMING_SNAKE_CASE )
return dataset
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not args.no_tpu:
lowercase__ = initialize_tpu(SCREAMING_SNAKE_CASE )
lowercase__ = tf.distribute.TPUStrategy(SCREAMING_SNAKE_CASE )
else:
lowercase__ = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase__ = AutoTokenizer.from_pretrained(args.tokenizer )
lowercase__ = AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase__ = tokenizer.vocab_size
lowercase__ = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
lowercase__ = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
lowercase__ = count_samples(SCREAMING_SNAKE_CASE )
lowercase__ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase__ = steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase__ = TFAutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase__ , lowercase__ = create_optimizer(
num_train_steps=SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=SCREAMING_SNAKE_CASE , metrics=['''accuracy'''] )
def decode_fn(SCREAMING_SNAKE_CASE ):
lowercase__ = {
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase__ = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
def mask_with_collator(SCREAMING_SNAKE_CASE ):
# TF really needs an isin() function
lowercase__ = (
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase__ , lowercase__ = data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=SCREAMING_SNAKE_CASE , )
return batch
lowercase__ = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase__ = prepare_dataset(
SCREAMING_SNAKE_CASE , decode_fn=SCREAMING_SNAKE_CASE , mask_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase__ = prepare_dataset(
SCREAMING_SNAKE_CASE , decode_fn=SCREAMING_SNAKE_CASE , mask_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , )
lowercase__ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=SCREAMING_SNAKE_CASE ) )
model.fit(
SCREAMING_SNAKE_CASE , validation_data=SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=SCREAMING_SNAKE_CASE , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase = parse_args()
main(args)
| 43 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> List[str]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] , **_a : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 691 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict,__A : Tuple,__A : str=7,__A : Dict=3,__A : Any=1_8,__A : List[str]=3_0,__A : str=4_0_0,__A : Union[str, Any]=True,__A : Optional[int]=None,__A : List[Any]=True,__A : Optional[int]=None,__A : Dict=True,__A : str=[0.5, 0.5, 0.5],__A : Dict=[0.5, 0.5, 0.5],__A : Any=False,):
_lowerCamelCase : List[str] = size if size is not None else {"height": 2_0, "width": 2_0}
_lowerCamelCase : Union[str, Any] = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
_lowerCamelCase : str = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : str = image_size
_lowerCamelCase : Tuple = min_resolution
_lowerCamelCase : List[str] = max_resolution
_lowerCamelCase : Tuple = do_resize
_lowerCamelCase : Union[str, Any] = size
_lowerCamelCase : Any = do_center_crop
_lowerCamelCase : str = crop_size
_lowerCamelCase : Dict = do_normalize
_lowerCamelCase : List[str] = image_mean
_lowerCamelCase : Dict = image_std
_lowerCamelCase : Dict = do_reduce_labels
def lowerCamelCase_ ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
_lowerCamelCase : Optional[Any] = Image.open(dataset[0]["file"] )
_lowerCamelCase : Any = Image.open(dataset[1]["file"] )
return image, map
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
_lowerCamelCase : Dict = Image.open(ds[0]["file"] )
_lowerCamelCase : Dict = Image.open(ds[1]["file"] )
_lowerCamelCase : Optional[Any] = Image.open(ds[2]["file"] )
_lowerCamelCase : List[str] = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = BeitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Tuple = BeitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A,"do_resize" ) )
self.assertTrue(hasattr(__A,"size" ) )
self.assertTrue(hasattr(__A,"do_center_crop" ) )
self.assertTrue(hasattr(__A,"center_crop" ) )
self.assertTrue(hasattr(__A,"do_normalize" ) )
self.assertTrue(hasattr(__A,"image_mean" ) )
self.assertTrue(hasattr(__A,"image_std" ) )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{"height": 2_0, "width": 2_0} )
self.assertEqual(image_processor.crop_size,{"height": 1_8, "width": 1_8} )
self.assertEqual(image_processor.do_reduce_labels,__A )
_lowerCamelCase : str = self.image_processing_class.from_dict(
self.image_processor_dict,size=4_2,crop_size=8_4,reduce_labels=__A )
self.assertEqual(image_processor.size,{"height": 4_2, "width": 4_2} )
self.assertEqual(image_processor.crop_size,{"height": 8_4, "width": 8_4} )
self.assertEqual(image_processor.do_reduce_labels,__A )
def lowerCamelCase_ ( self : int ):
pass
def lowerCamelCase_ ( self : List[Any] ):
# Initialize image_processing
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A,Image.Image )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
# Test batched
_lowerCamelCase : Optional[int] = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
def lowerCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
_lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A,np.ndarray )
# Test not batched input
_lowerCamelCase : Tuple = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
# Test batched
_lowerCamelCase : Optional[int] = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
def lowerCamelCase_ ( self : int ):
# Initialize image_processing
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A,torch.Tensor )
# Test not batched input
_lowerCamelCase : Optional[Any] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
# Test batched
_lowerCamelCase : Union[str, Any] = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
def lowerCamelCase_ ( self : Dict ):
# Initialize image_processing
_lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : int = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A,torchify=__A )
_lowerCamelCase : Dict = []
for image in image_inputs:
self.assertIsInstance(__A,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_lowerCamelCase : Optional[Any] = image_processing(image_inputs[0],maps[0],return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
self.assertEqual(
encoding["labels"].shape,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
self.assertEqual(encoding["labels"].dtype,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_5_5 )
# Test batched
_lowerCamelCase : Tuple = image_processing(__A,__A,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
self.assertEqual(
encoding["labels"].shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
self.assertEqual(encoding["labels"].dtype,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_5_5 )
# Test not batched input (PIL images)
_lowerCamelCase , _lowerCamelCase : int = prepare_semantic_single_inputs()
_lowerCamelCase : Union[str, Any] = image_processing(__A,__A,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
self.assertEqual(
encoding["labels"].shape,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
self.assertEqual(encoding["labels"].dtype,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_5_5 )
# Test batched input (PIL images)
_lowerCamelCase , _lowerCamelCase : Tuple = prepare_semantic_batch_inputs()
_lowerCamelCase : List[str] = image_processing(__A,__A,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
self.assertEqual(
encoding["labels"].shape,(
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
self.assertEqual(encoding["labels"].dtype,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_5_5 )
def lowerCamelCase_ ( self : Union[str, Any] ):
# Initialize image_processing
_lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_lowerCamelCase , _lowerCamelCase : int = prepare_semantic_single_inputs()
_lowerCamelCase : int = image_processing(__A,__A,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 1_5_0 )
_lowerCamelCase : Any = True
_lowerCamelCase : List[Any] = image_processing(__A,__A,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_5_5 ) | 44 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 | 0 |
def A ( ) -> Optional[Any]:
UpperCamelCase__ :int = 0
for i in range(1 , 1001 ):
total += i**i
return str(lowercase__ )[-10:]
if __name__ == "__main__":
print(solution()) | 45 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> list:
'''simple docstring'''
_lowerCamelCase : Tuple = []
_lowerCamelCase, _lowerCamelCase : List[Any] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
_lowerCamelCase : int = result + left + right
return input_list
def lowerCamelCase_( _lowerCamelCase ) -> list:
'''simple docstring'''
if len(_lowerCamelCase ) <= 1:
return input_list
_lowerCamelCase : Dict = list(_lowerCamelCase )
# iteration for two-way merging
_lowerCamelCase : Dict = 2
while p <= len(_lowerCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ):
_lowerCamelCase : int = i
_lowerCamelCase : Union[str, Any] = i + p - 1
_lowerCamelCase : Optional[Any] = (low + high + 1) // 2
_lowerCamelCase : Union[str, Any] = merge(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# final merge of last two parts
if p * 2 >= len(_lowerCamelCase ):
_lowerCamelCase : Optional[int] = i
_lowerCamelCase : Any = merge(_lowerCamelCase , 0 , _lowerCamelCase , len(_lowerCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_lowerCAmelCase : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
_lowerCAmelCase : List[Any] = []
else:
_lowerCAmelCase : str = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted)) | 46 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset | 691 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase__ ( lowerCamelCase_ : BertModel , lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : str = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
__a : int = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
__a : Union[str, Any] = model.state_dict()
def to_tf_var_name(lowerCamelCase_ : str ):
for patt, repl in iter(lowerCamelCase_ ):
__a : Union[str, Any] = name.replace(lowerCamelCase_ , lowerCamelCase_ )
return f'''bert/{name}'''
def create_tf_var(lowerCamelCase_ : np.ndarray , lowerCamelCase_ : str , lowerCamelCase_ : tf.Session ):
__a : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
__a : List[str] = tf.get_variable(dtype=lowerCamelCase_ , shape=tensor.shape , name=lowerCamelCase_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCamelCase_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__a : Optional[Any] = to_tf_var_name(lowerCamelCase_ )
__a : Optional[Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__a : Union[str, Any] = torch_tensor.T
__a : str = create_tf_var(tensor=lowerCamelCase_ , name=lowerCamelCase_ , session=lowerCamelCase_ )
tf.keras.backend.set_value(lowerCamelCase_ , lowerCamelCase_ )
__a : Union[str, Any] = session.run(lowerCamelCase_ )
print(f'''Successfully created {tf_name}: {np.allclose(lowerCamelCase_ , lowerCamelCase_ )}''' )
__a : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any]=None ):
__a : str = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Directory in which to save tensorflow model' )
__a : List[Any] = parser.parse_args(lowerCamelCase_ )
__a : Optional[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCamelCase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 47 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def A ( UpperCamelCase_ : int ) -> list[int]:
'''simple docstring'''
if num <= 0:
lowerCAmelCase__ = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(UpperCamelCase_ )
lowerCAmelCase__ = [True] * (num + 1)
lowerCAmelCase__ = []
lowerCAmelCase__ = 2
lowerCAmelCase__ = int(math.sqrt(UpperCamelCase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(UpperCamelCase_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , UpperCamelCase_ ):
if sieve[i] is True:
lowerCAmelCase__ = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(UpperCamelCase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 48 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 | 0 |
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self : Any , _lowercase : int , _lowercase : Optional[Any]=None , _lowercase : Optional[Any]=None ):
__UpperCAmelCase = data
__UpperCAmelCase = previous
__UpperCAmelCase = next_node
def __str__( self : Optional[int] ):
return F'''{self.data}'''
def a ( self : Tuple ):
return self.data
def a ( self : Any ):
return self.next
def a ( self : Any ):
return self.previous
class _UpperCAmelCase :
def __init__( self : int , _lowercase : List[Any] ):
__UpperCAmelCase = head
def __iter__( self : str ):
return self
def a ( self : Optional[int] ):
if not self.current:
raise StopIteration
else:
__UpperCAmelCase = self.current.get_data()
__UpperCAmelCase = self.current.get_next()
return value
class _UpperCAmelCase :
def __init__( self : int ):
__UpperCAmelCase = None # First node in list
__UpperCAmelCase = None # Last node in list
def __str__( self : List[str] ):
__UpperCAmelCase = self.head
__UpperCAmelCase = []
while current is not None:
nodes.append(current.get_data() )
__UpperCAmelCase = current.get_next()
return " ".join(str(_lowercase ) for node in nodes )
def __contains__( self : int , _lowercase : int ):
__UpperCAmelCase = self.head
while current:
if current.get_data() == value:
return True
__UpperCAmelCase = current.get_next()
return False
def __iter__( self : Union[str, Any] ):
return LinkedListIterator(self.head )
def a ( self : Optional[Any] ):
if self.head:
return self.head.get_data()
return None
def a ( self : Union[str, Any] ):
if self.tail:
return self.tail.get_data()
return None
def a ( self : Optional[int] , _lowercase : Node ):
if self.head is None:
__UpperCAmelCase = node
__UpperCAmelCase = node
else:
self.insert_before_node(self.head , _lowercase )
def a ( self : Optional[Any] , _lowercase : Node ):
if self.head is None:
self.set_head(_lowercase )
else:
self.insert_after_node(self.tail , _lowercase )
def a ( self : Optional[int] , _lowercase : int ):
__UpperCAmelCase = Node(_lowercase )
if self.head is None:
self.set_head(_lowercase )
else:
self.set_tail(_lowercase )
def a ( self : Optional[Any] , _lowercase : Node , _lowercase : Node ):
__UpperCAmelCase = node
__UpperCAmelCase = node.previous
if node.get_previous() is None:
__UpperCAmelCase = node_to_insert
else:
__UpperCAmelCase = node_to_insert
__UpperCAmelCase = node_to_insert
def a ( self : Optional[Any] , _lowercase : Node , _lowercase : Node ):
__UpperCAmelCase = node
__UpperCAmelCase = node.next
if node.get_next() is None:
__UpperCAmelCase = node_to_insert
else:
__UpperCAmelCase = node_to_insert
__UpperCAmelCase = node_to_insert
def a ( self : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = 1
__UpperCAmelCase = Node(_lowercase )
__UpperCAmelCase = self.head
while node:
if current_position == position:
self.insert_before_node(_lowercase , _lowercase )
return
current_position += 1
__UpperCAmelCase = node.next
self.insert_after_node(self.tail , _lowercase )
def a ( self : Optional[int] , _lowercase : int ):
__UpperCAmelCase = self.head
while node:
if node.get_data() == item:
return node
__UpperCAmelCase = node.get_next()
raise Exception('''Node not found''' )
def a ( self : Any , _lowercase : str ):
if (node := self.get_node(_lowercase )) is not None:
if node == self.head:
__UpperCAmelCase = self.head.get_next()
if node == self.tail:
__UpperCAmelCase = self.tail.get_previous()
self.remove_node_pointers(_lowercase )
@staticmethod
def a ( _lowercase : Node ):
if node.get_next():
__UpperCAmelCase = node.previous
if node.get_previous():
__UpperCAmelCase = node.next
__UpperCAmelCase = None
__UpperCAmelCase = None
def a ( self : str ):
return self.head is None
def lowercase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 691 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase : Tuple = 16
UpperCamelCase : List[Any] = 32
def A__ ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : int = 16 ):
lowerCamelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCamelCase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCAmelCase : str ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase__ = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCAmelCase : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase__ = 8
else:
lowerCamelCase__ = None
return tokenizer.pad(
__lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCamelCase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
lowerCamelCase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCAmelCase ) == "1":
lowerCamelCase__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
lowerCamelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ = config["""lr"""]
lowerCamelCase__ = int(config["""num_epochs"""] )
lowerCamelCase__ = int(config["""seed"""] )
lowerCamelCase__ = int(config["""batch_size"""] )
set_seed(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase__ = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
lowerCamelCase__ = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase__ = os.path.split(__lowerCAmelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(__lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase__ = 0
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase__ = model(**__lowerCAmelCase )
lowerCamelCase__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase__ = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ = model(**__lowerCAmelCase )
lowerCamelCase__ = outputs.logits.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
lowerCamelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __lowerCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(__lowerCAmelCase ),
"""epoch""": epoch,
} , step=__lowerCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def A__ ( ):
lowerCamelCase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=__lowerCAmelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 50 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase = MobileBertConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(f"Building PyTorch model from configuration: {config}" )
UpperCAmelCase = MobileBertForPreTraining(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
UpperCAmelCase = load_tf_weights_in_mobilebert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a__ : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 51 |
def lowerCamelCase( a__ ,a__):
return number | (1 << position)
def lowerCamelCase( a__ ,a__):
return number & ~(1 << position)
def lowerCamelCase( a__ ,a__):
return number ^ (1 << position)
def lowerCamelCase( a__ ,a__):
return ((number >> position) & 1) == 1
def lowerCamelCase( a__ ,a__):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=[10, 20, 30, 40] , _UpperCAmelCase=[2, 2, 3, 2] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=10 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=["stage2", "stage3", "stage4"] , _UpperCAmelCase=[2, 3, 4] , _UpperCAmelCase=None , ):
__a : Union[str, Any] = parent
__a : str = batch_size
__a : Union[str, Any] = image_size
__a : List[str] = num_channels
__a : Optional[int] = num_stages
__a : Optional[int] = hidden_sizes
__a : Dict = depths
__a : Optional[int] = is_training
__a : List[str] = use_labels
__a : Optional[Any] = intermediate_size
__a : Optional[Any] = hidden_act
__a : str = num_labels
__a : Optional[int] = initializer_range
__a : Dict = out_features
__a : List[Any] = out_indices
__a : str = scope
def _lowerCamelCase ( self ):
__a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Any = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Union[str, Any] = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = ConvNextVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : List[str] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : List[Any] = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__a : Any = None
__a : Optional[Any] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.prepare_config_and_inputs()
__a , __a , __a : Any = config_and_inputs
__a : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.prepare_config_and_inputs()
__a , __a , __a : int = config_and_inputs
__a : Union[str, Any] = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : Any = ConvNextVaModelTester(self )
__a : str = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self ):
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_with_labels()
__a : List[str] = True
if model_class.__name__ in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]:
continue
__a : Dict = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
__a : Optional[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
__a : Dict = model(**_UpperCAmelCase ).loss
loss.backward()
def _lowerCamelCase ( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__a : Dict = False
__a : Optional[Any] = True
if (
model_class.__name__
in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__a : int = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
__a : List[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
__a : List[str] = model(**_UpperCAmelCase ).loss
loss.backward()
def _lowerCamelCase ( self ):
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Tuple = model_class(_UpperCAmelCase )
__a : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Union[str, Any] = [*signature.parameters.keys()]
__a : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__a : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__a : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : Dict = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[Any] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : str = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Tuple = ConvNextVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __A ( ) -> str:
__a : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self ):
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ):
__a : Optional[Any] = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_UpperCAmelCase )
__a : str = self.default_image_processor
__a : Optional[Any] = prepare_img()
__a : Optional[Any] = preprocessor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__a : List[str] = model(**_UpperCAmelCase )
# verify the logits
__a : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__a : Optional[Any] = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) ) | 52 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =8
# DPR tok
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Dict ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self : Optional[int] , _a : bool ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dataset''' )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_SCREAMING_SNAKE_CASE ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary. | 691 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_snake_case : List[Any] = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_a )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_SCREAMING_SNAKE_CASE =DDIMScheduler(**_a )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : str , _a : int , _a : int=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE ='''A red cartoon frog, 4k'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_a , image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a ) | 691 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __lowercase , unittest.TestCase ):
_snake_case =KandinskyVaaControlnetPipeline
_snake_case =['''image_embeds''', '''negative_image_embeds''', '''hint''']
_snake_case =['''image_embeds''', '''negative_image_embeds''', '''hint''']
_snake_case =[
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case =False
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> str:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
return 100
@property
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ ={
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ =UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_unet
UpperCAmelCase_ =self.dummy_movq
UpperCAmelCase_ =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=_lowerCAmelCase , )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[Any]=0 ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
# create hint
UpperCAmelCase_ =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ ="cpu"
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =self.pipeline_class(**_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ =np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
UpperCAmelCase_ =torch.from_numpy(np.array(_lowerCAmelCase ) ).float() / 2_55.0
UpperCAmelCase_ =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase_ =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
UpperCAmelCase_ =KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ ="A robot, 4k photo"
UpperCAmelCase_ =torch.Generator(device="cuda" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ =pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ =torch.Generator(device="cuda" ).manual_seed(0 )
UpperCAmelCase_ =pipeline(
image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , hint=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , output_type="np" , )
UpperCAmelCase_ =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 54 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 691 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :Optional[Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :str = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 |
def lowerCamelCase( a__ ,a__):
return int((input_a, input_a).count(0) == 0)
def lowerCamelCase( ):
assert and_gate(0 ,0) == 0
assert and_gate(0 ,1) == 0
assert and_gate(1 ,0) == 0
assert and_gate(1 ,1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 691 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : Any = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ["ViTFeatureExtractor"]
_a : Tuple = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Optional[Any] = logging.get_logger(__name__)
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Optional[Any] =['''pixel_values''']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = True , _lowerCamelCase = 1 / 2_5_5 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
UpperCamelCase_: List[str] = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
UpperCamelCase_: str = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
UpperCamelCase_: int = do_resize
UpperCamelCase_: Union[str, Any] = size
UpperCamelCase_: str = resample
UpperCamelCase_: str = do_rescale
UpperCamelCase_: Tuple = rescale_factor
UpperCamelCase_: Any = do_normalize
UpperCamelCase_: Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase_: List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase_: str = do_convert_rgb
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCamelCase_: Union[str, Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
UpperCamelCase_: int = (size['height'], size['width'])
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
UpperCamelCase_: List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_: str = resample if resample is not None else self.resample
UpperCamelCase_: List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_: Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_: Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: List[str] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_: Any = image_std if image_std is not None else self.image_std
UpperCamelCase_: Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_: int = size if size is not None else self.size
UpperCamelCase_: str = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_: str = [convert_to_rgb(_lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_: int = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase_: Union[str, Any] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_rescale:
UpperCamelCase_: List[str] = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase_: Optional[Any] = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images]
UpperCamelCase_: Dict = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
UpperCamelCase_: int = BatchFeature(data={'pixel_values': images} , tensor_type=_lowerCamelCase )
return encoded_outputs | 57 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 0
UpperCAmelCase = False
UpperCAmelCase = 3.0
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_SCREAMING_SNAKE_CASE =Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_SCREAMING_SNAKE_CASE =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case_ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case_ : Dict = torch.nn.Linear(1_00, 2_00)
snake_case_ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case_ : Dict = ''''''
snake_case_ : str = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 691 | 0 |
"""simple docstring"""
import math
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_6_0:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 58 |
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst | 691 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str]=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : int=18 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : Dict=400 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=True , ) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Any =size if size is not None else {"height": 18, "width": 18}
lowerCamelCase__: Tuple =parent
lowerCamelCase__: Dict =batch_size
lowerCamelCase__: Optional[Any] =num_channels
lowerCamelCase__: Union[str, Any] =image_size
lowerCamelCase__: Dict =min_resolution
lowerCamelCase__: int =max_resolution
lowerCamelCase__: Tuple =do_resize
lowerCamelCase__: Tuple =size
lowerCamelCase__: Dict =do_normalize
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Any:
'''simple docstring'''
lowerCamelCase__: int =ImageGPTImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase_ , "clusters"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize"))
self.assertTrue(hasattr(UpperCAmelCase_ , "size"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize"))
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 18, "width": 18})
lowerCamelCase__: Optional[Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"height": 42, "width": 42})
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Any:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.image_processing_class(**self.image_processor_dict)
lowerCamelCase__: str =json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , obj[key]))
else:
self.assertEqual(obj[key] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str:
'''simple docstring'''
lowerCamelCase__: List[str] =self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__: Any =os.path.join(UpperCAmelCase_ , "image_processor.json")
image_processor_first.to_json_file(UpperCAmelCase_)
lowerCamelCase__: Tuple =self.image_processing_class.from_json_file(UpperCAmelCase_).to_dict()
lowerCamelCase__: Optional[Any] =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =self.image_processing_class.from_pretrained(UpperCAmelCase_).to_dict()
lowerCamelCase__: Tuple =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , UpperCAmelCase_)
@unittest.skip("ImageGPT requires clusters at initialization")
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
pass
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase__: Optional[int] =load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
lowerCamelCase__: Union[str, Any] =Image.open(dataset[4]["file"] )
lowerCamelCase__: int =Image.open(dataset[5]["file"] )
lowerCamelCase__: List[Any] =[imagea, imagea]
return images
@require_vision
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small")
lowerCamelCase__: List[str] =prepare_images()
# test non-batched
lowerCamelCase__: Optional[int] =image_processing(images[0] , return_tensors="pt")
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_024))
lowerCamelCase__: Optional[int] =[306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCAmelCase_)
# test batched
lowerCamelCase__: str =image_processing(UpperCAmelCase_ , return_tensors="pt")
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_024))
lowerCamelCase__: Union[str, Any] =[303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCAmelCase_)
| 59 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 | 0 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''owlvit_text_model'''
def __init__(self , __magic_name__=4_9408 , __magic_name__=512 , __magic_name__=2048 , __magic_name__=12 , __magic_name__=8 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=0 , __magic_name__=4_9406 , __magic_name__=4_9407 , **__magic_name__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
snake_case_ : int = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[Any] = intermediate_size
snake_case_ : str = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Dict = attention_dropout
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : int = initializer_factor
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : str = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
snake_case_ : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''owlvit_vision_model'''
def __init__(self , __magic_name__=768 , __magic_name__=3072 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=768 , __magic_name__=32 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = image_size
snake_case_ : Dict = patch_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Dict = attention_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : List[Any] = initializer_factor
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
snake_case_ : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''owlvit'''
lowerCamelCase_ : Optional[int] = True
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=512 , __magic_name__=2.6_592 , __magic_name__=True , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(**__magic_name__ )
if text_config is None:
snake_case_ : Tuple = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
snake_case_ : str = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
snake_case_ : str = OwlViTTextConfig(**__magic_name__ )
snake_case_ : Union[str, Any] = OwlViTVisionConfig(**__magic_name__ )
snake_case_ : Any = projection_dim
snake_case_ : Union[str, Any] = logit_scale_init_value
snake_case_ : str = return_dict
snake_case_ : Any = 1.0
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : Optional[Any] = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = {}
snake_case_ : Union[str, Any] = text_config
snake_case_ : Optional[Any] = vision_config
return cls.from_dict(__magic_name__ , **__magic_name__ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : List[Any] = self.text_config.to_dict()
snake_case_ : List[Any] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
class __lowerCAmelCase ( _a ):
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-4
def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__magic_name__ , seq_length=__magic_name__ , framework=__magic_name__ )
snake_case_ : List[str] = super().generate_dummy_inputs(
processor.image_processor , batch_size=__magic_name__ , framework=__magic_name__ )
return {**text_input_dict, **image_input_dict}
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return 14
| 60 |
def lowerCamelCase( a__ ,a__ ,a__):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod
else:
_SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__)
return (b * b) % mod
# a prime number
snake_case_ : Union[str, Any] = 7_01
snake_case_ : int = 10_00_00_00_00
snake_case_ : str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 691 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : Any ) -> int:
lowerCAmelCase__ = "ZinengTang/tvlt-base"
lowerCAmelCase__ = tempfile.mkdtemp()
def a ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ )
def a ( self : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> Any:
shutil.rmtree(self.tmpdirname )
def a ( self : Any ) -> Union[str, Any]:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> List[Any]:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = np.ones([12_000] )
lowerCAmelCase__ = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="np" )
lowerCAmelCase__ = processor(audio=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Dict ) -> str:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = np.ones([3, 224, 224] )
lowerCAmelCase__ = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="np" )
lowerCAmelCase__ = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : int ) -> Any:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = np.ones([12_000] )
lowerCAmelCase__ = np.ones([3, 224, 224] )
lowerCAmelCase__ = processor(audio=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def a ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 61 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , _a : int , _a : Optional[Any]=3 , _a : Tuple=32 , _a : Any=3 , _a : Union[str, Any]=10 , _a : Optional[int]=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[Any]=True , _a : int=True , _a : Tuple="relu" , _a : Optional[Any]=3 , _a : str=None , _a : List[Any]=["stage2", "stage3", "stage4"] , _a : Union[str, Any]=[2, 3, 4] , _a : Dict=1 , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : str , _a : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , _a : Any , _a : str , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_a : Any , _a : Optional[int] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self ) | 691 | 0 |
import enum
import shutil
import sys
snake_case , snake_case = shutil.get_terminal_size()
snake_case = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class SCREAMING_SNAKE_CASE ( enum.Enum ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = 0
UpperCamelCase_ : Any = 1
def lowerCamelCase__ ( lowercase , lowercase="" ):
"""simple docstring"""
sys.stdout.write(str(lowercase ) + end )
sys.stdout.flush()
def lowerCamelCase__ ( lowercase , lowercase , lowercase="" ):
"""simple docstring"""
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
forceWrite("\r" )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def lowerCamelCase__ ( ):
"""simple docstring"""
forceWrite(" " * TERMINAL_WIDTH )
reset_cursor()
def lowerCamelCase__ ( ):
"""simple docstring"""
reset_cursor()
forceWrite("-" * TERMINAL_WIDTH )
| 62 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Any , __lowercase : Union[str, Any]=3 , __lowercase : str=32 , __lowercase : List[str]=3 , __lowercase : Union[str, Any]=10 , __lowercase : int=[10, 20, 30, 40] , __lowercase : Any=[1, 1, 2, 1] , __lowercase : Union[str, Any]=True , __lowercase : Optional[Any]=True , __lowercase : str="relu" , __lowercase : int=3 , __lowercase : Any=None , ) -> Optional[int]:
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : Union[str, Any] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : str = embeddings_size
__UpperCAmelCase : Union[str, Any] = hidden_sizes
__UpperCAmelCase : str = depths
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : Dict = scope
__UpperCAmelCase : Any = len(__lowercase )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : str ) -> Optional[int]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase ( self : int , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = TFResNetModel(config=__lowercase )
__UpperCAmelCase : Any = model(__lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase ( self : Any , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Any ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[Any] = TFResNetForImageClassification(__lowercase )
__UpperCAmelCase : Dict = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Tuple = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a : Union[str, Any] = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
a : List[str] = False
a : List[Any] = False
a : Tuple = False
a : List[Any] = False
a : Union[str, Any] = False
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = TFResNetModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCAmelCase ( self : Tuple ) -> List[str]:
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
pass
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowercase )
__UpperCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> str:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCAmelCase ( self : Any ) -> List[Any]:
def check_hidden_states_output(__lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Tuple ):
__UpperCAmelCase : Optional[int] = model_class(__lowercase )
__UpperCAmelCase : str = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__UpperCAmelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(__lowercase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCAmelCase : Any = layer_type
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Tuple = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = TFResNetModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCamelCase__ ( ):
__UpperCAmelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self : Any ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : Any = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__UpperCAmelCase : int = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : List[str] = image_processor(images=__lowercase , return_tensors="""tf""" )
# forward pass
__UpperCAmelCase : Tuple = model(**__lowercase )
# verify the logits
__UpperCAmelCase : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__UpperCAmelCase : Union[str, Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowercase , atol=1e-4 ) )
| 63 |
from manim import *
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_SCREAMING_SNAKE_CASE =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait() | 691 | 0 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ : str = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_2_8,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 5_0,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 1_0,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 1_0,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _lowerCamelCase ( unittest.TestCase ):
@classmethod
def UpperCamelCase_ ( cls ) -> Dict:
SCREAMING_SNAKE_CASE__: Union[str, Any]= TOKEN
HfFolder.save_token(lowerCAmelCase )
@classmethod
def UpperCamelCase_ ( cls ) -> Any:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Dict= BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__: List[str]= BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase , repo_id='''test-config''' , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__: Union[str, Any]= BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[int]= BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__: Optional[int]= BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__: Tuple= BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> Tuple:
CustomConfig.register_for_auto_class()
SCREAMING_SNAKE_CASE__: Optional[int]= CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
SCREAMING_SNAKE_CASE__: Tuple= AutoConfig.from_pretrained(f'{USER}/test-dynamic-config' , trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Tuple= GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
SCREAMING_SNAKE_CASE__: List[Any]= c.n_embd + 1 # int
SCREAMING_SNAKE_CASE__: List[Any]= c.resid_pdrop + 1.0 # float
SCREAMING_SNAKE_CASE__: List[Any]= not c.scale_attn_weights # bool
SCREAMING_SNAKE_CASE__: List[str]= c.summary_type + '''foo''' # str
c.update_from_string(
f'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(lowerCAmelCase , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(lowerCAmelCase , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCAmelCase , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCAmelCase , c.summary_type , '''mismatch for key: summary_type''' )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Any= PretrainedConfig()
SCREAMING_SNAKE_CASE__: Union[str, Any]= [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
SCREAMING_SNAKE_CASE__: List[str]= [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase , lowerCAmelCase )]
if len(lowerCAmelCase ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f' {", ".join(lowerCAmelCase )}.' )
def UpperCamelCase_ ( self ) -> List[Any]:
with self.assertRaises(lowerCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE__: Dict= BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
SCREAMING_SNAKE_CASE__: Optional[int]= BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Tuple:
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE__: Dict= mock.Mock()
SCREAMING_SNAKE_CASE__: int= 500
SCREAMING_SNAKE_CASE__: List[Any]= {}
SCREAMING_SNAKE_CASE__: Optional[Any]= HTTPError
SCREAMING_SNAKE_CASE__: Tuple= {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__: List[str]= BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase ) as mock_head:
SCREAMING_SNAKE_CASE__: Any= BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self ) -> str:
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE__: Dict= BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= AutoConfig.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
SCREAMING_SNAKE_CASE__: Any= AutoConfig.from_pretrained(lowerCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
SCREAMING_SNAKE_CASE__: Dict= ['''config.42.0.0.json''']
SCREAMING_SNAKE_CASE__: str= 768
configuration.save_pretrained(lowerCAmelCase )
shutil.move(os.path.join(lowerCAmelCase , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase , '''config.42.0.0.json''' ) )
SCREAMING_SNAKE_CASE__: Union[str, Any]= AutoConfig.from_pretrained(lowerCAmelCase )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase_ ( self ) -> List[str]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
SCREAMING_SNAKE_CASE__: int= '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
SCREAMING_SNAKE_CASE__: int= '''v4.0.0'''
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase , return_unused_kwargs=lowerCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
SCREAMING_SNAKE_CASE__: Any= '''v3.0.0'''
SCREAMING_SNAKE_CASE__: Tuple= old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase )
self.assertEqual(old_configuration.hidden_size , 768 )
| 64 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case_ : List[str] = {'''facebook/blenderbot-3B''': 1_28}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = BlenderbotTokenizer
def __init__( self : Dict , _a : str=None , _a : Optional[int]=None , _a : List[str]=None , _a : int="replace" , _a : Dict="<s>" , _a : Optional[Any]="</s>" , _a : Any="</s>" , _a : int="<s>" , _a : int="<unk>" , _a : Optional[int]="<pad>" , _a : Tuple="<mask>" , _a : Tuple=False , _a : Union[str, Any]=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE ='''post_processor'''
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''sep'''] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''cls'''] )
_SCREAMING_SNAKE_CASE =False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Optional[Any] , *_a : str , **_a : int ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : List[Any] , *_a : Optional[int] , **_a : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Dict , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Any , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_SCREAMING_SNAKE_CASE =''' '''.join(_a )
_SCREAMING_SNAKE_CASE =self.encode(_a )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 691 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class __lowercase ( __lowerCamelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
snake_case_ = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case_ = Features({"""text""": Value("""string""" )} )
snake_case_ = Features({"""labels""": ClassLabel} )
snake_case_ = "text"
snake_case_ = "labels"
def __lowercase ( self : Dict ,A : int ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] ,A ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
UpperCAmelCase__ : Dict = copy.deepcopy(self )
UpperCAmelCase__ : Optional[int] = self.label_schema.copy()
UpperCAmelCase__ : Dict = features[self.label_column]
UpperCAmelCase__ : Optional[int] = label_schema
return task_template
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 65 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> List[str]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] , **_a : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 691 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Dict = ["torch", "scipy"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch', 'scipy'] )
| 66 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 | 0 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list , snake_case__ :list ) -> float:
_validate_point(snake_case__ )
_validate_point(snake_case__ )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[float] ) -> None:
if point:
if isinstance(snake_case__ , snake_case__ ):
for item in point:
if not isinstance(snake_case__ , (int, float) ):
_lowercase = (
'Expected a list of numbers as input, found '
F"""{type(snake_case__ ).__name__}"""
)
raise TypeError(snake_case__ )
else:
_lowercase = F"""Expected a list of numbers as input, found {type(snake_case__ ).__name__}"""
raise TypeError(snake_case__ )
else:
raise ValueError('Missing an input' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list , snake_case__ :list ) -> float:
_validate_point(snake_case__ )
_validate_point(snake_case__ )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(snake_case__ , snake_case__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 | 0 |
from math import factorial
class _A :
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
__UpperCAmelCase =real
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =[1] * rank
else:
__UpperCAmelCase =rank
def __repr__( self : List[str] ) -> Dict:
return (
f'''{self.real}+'''
f'''{"+".join(str(__SCREAMING_SNAKE_CASE )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def _a ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase =self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , __SCREAMING_SNAKE_CASE )
def __add__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Any:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return Dual(self.real + other , self.duals )
__UpperCAmelCase =self.duals.copy()
__UpperCAmelCase =other.duals.copy()
if len(__SCREAMING_SNAKE_CASE ) > len(__SCREAMING_SNAKE_CASE ):
o_dual.extend([1] * (len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE )) )
elif len(__SCREAMING_SNAKE_CASE ) < len(__SCREAMING_SNAKE_CASE ):
s_dual.extend([1] * (len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE )) )
__UpperCAmelCase =[]
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , __SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = __add__
def __sub__( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
return self + other * -1
def __mul__( self : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =[]
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =[0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , __SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = __mul__
def __truediv__( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> str:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =[]
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , __SCREAMING_SNAKE_CASE )
raise ValueError
def __floordiv__( self : int , __SCREAMING_SNAKE_CASE : Tuple ) -> int:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =[]
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , __SCREAMING_SNAKE_CASE )
raise ValueError
def __pow__( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> int:
if n < 0 or isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
__UpperCAmelCase =self
for _ in range(n - 1 ):
x *= self
return x
def lowercase__ ( A_: int , A_: List[str] , A_: str ) -> Any:
"""simple docstring"""
if not callable(A_ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(A_ , (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(A_ , A_ ):
raise ValueError("""differentiate() requires an int as input for order""" )
__UpperCAmelCase =Dual(A_ , 1 )
__UpperCAmelCase =func(A_ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowercase__ ( A_: Any ) -> int:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 68 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset | 691 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : str , a_ : Dict , a_ : Optional[Any]=13 , a_ : Union[str, Any]=30 , a_ : str=2 , a_ : Union[str, Any]=3 , a_ : Optional[Any]=True , a_ : Dict=True , a_ : Optional[Any]=32 , a_ : str=5 , a_ : Union[str, Any]=4 , a_ : Optional[int]=37 , a_ : Optional[Any]="gelu" , a_ : Optional[Any]=0.1 , a_ : Any=0.1 , a_ : Optional[Any]=10 , a_ : Optional[Any]=0.02 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def A ( self : str ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , )
return config, pixel_values
def A ( self : Union[str, Any] , a_ : str , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = FlaxViTModel(config=a_ )
__snake_case = model(a_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (self.image_size, self.image_size)
__snake_case = (self.patch_size, self.patch_size)
__snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def A ( self : List[str] , a_ : Dict , a_ : str ):
"""simple docstring"""
__snake_case = self.type_sequence_label_size
__snake_case = FlaxViTForImageClassification(config=a_ )
__snake_case = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case = 1
__snake_case = FlaxViTForImageClassification(a_ )
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case = model(a_ )
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def A ( self : str ):
"""simple docstring"""
__snake_case = FlaxViTModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def A ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case = self._prepare_for_class(a_ , a_ )
__snake_case = model_class(a_ )
@jax.jit
def model_jitted(a_ : Dict , **a_ : str ):
return model(pixel_values=a_ , **a_ )
with self.subTest("JIT Enabled" ):
__snake_case = model_jitted(**a_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__snake_case = model_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) )
for jitted_output, output in zip(a_ , a_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A ( self : Union[str, Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained("google/vit-base-patch16-224" )
__snake_case = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(a_ )
| 69 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = 'imagenet-1k-id2label.json'
lowerCamelCase_ = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase_ = {int(lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCamelCase_ = BitConfig(
conv_layer=lowercase , num_labels=10_00 , idalabel=lowercase , labelaid=lowercase , )
return config
def _SCREAMING_SNAKE_CASE ( lowercase : Any ):
'''simple docstring'''
if "stem.conv" in name:
lowerCamelCase_ = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
lowerCamelCase_ = name.replace('blocks' , 'layers' )
if "head.fc" in name:
lowerCamelCase_ = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
lowerCamelCase_ = 'bit.' + name
if "bit" not in name and "classifier" not in name:
lowerCamelCase_ = 'bit.encoder.' + name
return name
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase_ = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : int , lowercase : Union[str, Any]=False ):
'''simple docstring'''
lowerCamelCase_ = get_config(lowercase )
# load original model from timm
lowerCamelCase_ = create_model(lowercase , pretrained=lowercase )
timm_model.eval()
# load state_dict of original model
lowerCamelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowercase )
lowerCamelCase_ = val.squeeze() if 'head' in key else val
# load HuggingFace model
lowerCamelCase_ = BitForImageClassification(lowercase )
model.eval()
model.load_state_dict(lowercase )
# create image processor
lowerCamelCase_ = create_transform(**resolve_data_config({} , model=lowercase ) )
lowerCamelCase_ = transform.transforms
lowerCamelCase_ = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
lowerCamelCase_ = BitImageProcessor(
do_resize=lowercase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = transform(lowercase ).unsqueeze(0 )
lowerCamelCase_ = processor(lowercase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowercase , lowercase )
# verify logits
with torch.no_grad():
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCamelCase_ = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
lowerCamelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 70 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {"""vocab_file""": """spiece.model"""}
_lowerCamelCase = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
_lowerCamelCase = {"""bert_for_seq_generation""": 512}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : List[str] =VOCAB_FILES_NAMES
__A : Dict =PRETRAINED_VOCAB_FILES_MAP
__A : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : List[int] =[]
__A : Any =["input_ids", "attention_mask"]
def __init__( self ,_snake_case ,_snake_case="<s>" ,_snake_case="</s>" ,_snake_case="<unk>" ,_snake_case="<pad>" ,_snake_case="<::::>" ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,pad_token=_snake_case ,sep_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
UpperCAmelCase_ : Optional[Any] = vocab_file
UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
@property
def UpperCamelCase__ ( self ):
return self.sp_model.get_piece_size()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
UpperCAmelCase_ : Tuple = self.__dict__.copy()
UpperCAmelCase_ : Dict = None
return state
def __setstate__( self ,_snake_case ):
UpperCAmelCase_ : Dict = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self ,_snake_case ):
return self.sp_model.encode(_snake_case ,out_type=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
return self.sp_model.piece_to_id(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : str = self.sp_model.IdToPiece(_snake_case )
return token
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : int = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_snake_case ) + token
UpperCAmelCase_ : int = []
else:
current_sub_tokens.append(_snake_case )
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
if not os.path.isdir(_snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : Any = os.path.join(
_snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,"wb" ) as fi:
UpperCAmelCase_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 71 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 691 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : str = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'roc_bert'
def __init__( self , snake_case_=3_05_22 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=True , snake_case_=0 , snake_case_="absolute" , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=7_68 , snake_case_=9_10 , snake_case_=5_12 , snake_case_=2_48_58 , snake_case_=True , **snake_case_ , ):
lowercase =vocab_size
lowercase =max_position_embeddings
lowercase =hidden_size
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =intermediate_size
lowercase =hidden_act
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =initializer_range
lowercase =type_vocab_size
lowercase =layer_norm_eps
lowercase =use_cache
lowercase =enable_pronunciation
lowercase =enable_shape
lowercase =pronunciation_embed_dim
lowercase =pronunciation_vocab_size
lowercase =shape_embed_dim
lowercase =shape_vocab_size
lowercase =concat_input
lowercase =position_embedding_type
lowercase =classifier_dropout
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
| 72 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 | 0 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCamelCase__ (*_UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=True , _UpperCAmelCase=2):
from .. import __version__
SCREAMING_SNAKE_CASE = take_from
SCREAMING_SNAKE_CASE = ()
if not isinstance(args[0] , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_UpperCAmelCase).base_version) >= version.parse(_UpperCAmelCase):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''')
SCREAMING_SNAKE_CASE = None
if isinstance(_UpperCAmelCase , _UpperCAmelCase) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_UpperCAmelCase),)
SCREAMING_SNAKE_CASE = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_UpperCAmelCase , _UpperCAmelCase):
values += (getattr(_UpperCAmelCase , _UpperCAmelCase),)
SCREAMING_SNAKE_CASE = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
SCREAMING_SNAKE_CASE = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , _UpperCAmelCase , stacklevel=_UpperCAmelCase)
if isinstance(_UpperCAmelCase , _UpperCAmelCase) and len(_UpperCAmelCase) > 0:
SCREAMING_SNAKE_CASE = inspect.getouterframes(inspect.currentframe())[1]
SCREAMING_SNAKE_CASE = call_frame.filename
SCREAMING_SNAKE_CASE = call_frame.lineno
SCREAMING_SNAKE_CASE = call_frame.function
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next(iter(deprecated_kwargs.items()))
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''')
if len(_UpperCAmelCase) == 0:
return
elif len(_UpperCAmelCase) == 1:
return values[0]
return values
| 73 |
def lowerCamelCase( a__ ,a__):
return number | (1 << position)
def lowerCamelCase( a__ ,a__):
return number & ~(1 << position)
def lowerCamelCase( a__ ,a__):
return number ^ (1 << position)
def lowerCamelCase( a__ ,a__):
return ((number >> position) & 1) == 1
def lowerCamelCase( a__ ,a__):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 | 0 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowercase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowercase_ = []
lowercase_ = []
lowercase_ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowercase_ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": f'''🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results''',
"""emoji""": True,
},
}
]
lowercase_ = 0
for log in Path().glob("""*.log"""):
lowercase_ = 0
with open(log, """r""") as f:
for line in f:
lowercase_ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowercase_ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowercase_ = f'''{line['duration']:.4f}'''
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowercase_ = []
log.unlink()
lowercase_ = """"""
lowercase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowercase_ = []
lowercase_ = {}
for test in failed_tests:
lowercase_ = test[0].split("""::""")
lowercase_ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowercase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowercase_ = [test[0] for test in failed_table]
lowercase_ = list(set(files))
# Count number of instances in failed_tests
lowercase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowercase_ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
lowercase_ = """Too many failed tests, please see the full report in the Action results."""
lowercase_ = len(err) + 10
lowercase_ = message[: 3_000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
lowercase_ = """No failed tests! 🤗"""
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowercase_ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowercase_ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowercase_ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": f'''https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
payload.append(action_button)
lowercase_ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": f'''Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}''',
}
],
}
payload.append(date_report)
lowercase_ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowercase_ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowercase_ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowercase_ = row[0]
else:
lowercase_ = """"""
lowercase_ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```''',
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 74 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =8
# DPR tok
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Dict ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self : Optional[int] , _a : bool ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dataset''' )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_SCREAMING_SNAKE_CASE ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary. | 691 | 0 |
'''simple docstring'''
from typing import Any
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> list:
_validation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# Creates data structures and fill initial step
UpperCAmelCase__ : dict = {}
UpperCAmelCase__ : dict = {}
for state in states_space:
UpperCAmelCase__ : str = observations_space[0]
UpperCAmelCase__ : List[Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCAmelCase__ : Dict = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : List[str] = observations_space[o]
UpperCAmelCase__ : Optional[int] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCAmelCase__ : Optional[int] = ''''''
UpperCAmelCase__ : Tuple = -1
for k_state in states_space:
UpperCAmelCase__ : Any = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCAmelCase__ : Optional[int] = probability
UpperCAmelCase__ : Any = k_state
# Update probabilities and pointers dicts
UpperCAmelCase__ : Union[str, Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCAmelCase__ : Optional[Any] = arg_max
# The final observation
UpperCAmelCase__ : str = observations_space[len(lowerCAmelCase__ ) - 1]
# argmax for given final observation
UpperCAmelCase__ : int = ''''''
UpperCAmelCase__ : Optional[Any] = -1
for k_state in states_space:
UpperCAmelCase__ : Optional[int] = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCAmelCase__ : Optional[Any] = probability
UpperCAmelCase__ : List[Any] = k_state
UpperCAmelCase__ : Union[str, Any] = arg_max
# Process pointers backwards
UpperCAmelCase__ : Union[str, Any] = last_state
UpperCAmelCase__ : int = []
for o in range(len(lowerCAmelCase__ ) - 1 , -1 , -1 ):
result.append(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> None:
_validate_not_empty(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
_validate_lists(lowerCAmelCase__ , lowerCAmelCase__ )
_validate_dicts(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
_validate_list(lowerCAmelCase__ , '''observations_space''' )
_validate_list(lowerCAmelCase__ , '''states_space''' )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
UpperCAmelCase__ : int = F"""{var_name} must be a list"""
raise ValueError(lowerCAmelCase__ )
else:
for x in _object:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = F"""{var_name} must be a list of strings"""
raise ValueError(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> None:
_validate_dict(lowerCAmelCase__ , '''initial_probabilities''' , lowerCAmelCase__ )
_validate_nested_dict(lowerCAmelCase__ , '''transition_probabilities''' )
_validate_nested_dict(lowerCAmelCase__ , '''emission_probabilities''' )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
_validate_dict(_object , lowerCAmelCase__ , lowerCAmelCase__ )
for x in _object.values():
_validate_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = F"""{var_name} must be a dict"""
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object ):
UpperCAmelCase__ : int = F"""{var_name} all keys must be strings"""
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object.values() ):
UpperCAmelCase__ : str = '''nested dictionary ''' if nested else ''''''
UpperCAmelCase__ : Dict = F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 75 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_a )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_SCREAMING_SNAKE_CASE =DDIMScheduler(**_a )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : str , _a : int , _a : int=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE ='''A red cartoon frog, 4k'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_a , image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a ) | 691 | 0 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
a_ = True
except (ImportError, AttributeError):
a_ = object
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
pass
a_ = False
a_ = logging.get_logger('transformers-cli/serving')
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Optional[Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__UpperCamelCase , args.host , args.port , args.workers )
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
@staticmethod
def _lowerCamelCase ( UpperCamelCase_ ) -> List[str]:
__lowercase : Union[str, Any] = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=UpperCamelCase_ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=UpperCamelCase_ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=UpperCamelCase_ , default=88_88 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=UpperCamelCase_ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=UpperCamelCase_ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=UpperCamelCase_ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=UpperCamelCase_ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=UpperCamelCase_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
__lowercase : Dict = pipeline
__lowercase : str = host
__lowercase : Union[str, Any] = port
__lowercase : Union[str, Any] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
__lowercase : Tuple = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''POST'''] , ),
] , timeout=6_00 , )
def _lowerCamelCase ( self ) -> Tuple:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _lowerCamelCase ( self ) -> Any:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _lowerCamelCase ( self , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) ) -> str:
try:
__lowercase : Optional[int] = self._pipeline.tokenizer.tokenize(UpperCamelCase_ )
if return_ids:
__lowercase : Optional[int] = self._pipeline.tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
return ServeTokenizeResult(tokens=UpperCamelCase_ , tokens_ids=UpperCamelCase_ )
else:
return ServeTokenizeResult(tokens=UpperCamelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(UpperCamelCase_ )} )
def _lowerCamelCase ( self , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , ) -> List[Any]:
try:
__lowercase : str = self._pipeline.tokenizer.decode(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return ServeDeTokenizeResult(model='''''' , text=UpperCamelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(UpperCamelCase_ )} )
async def _lowerCamelCase ( self , UpperCamelCase_=Body(UpperCamelCase_ , embed=UpperCamelCase_ ) ) -> Dict:
# Check we don't have empty string
if len(UpperCamelCase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__lowercase : int = self._pipeline(UpperCamelCase_ )
return ServeForwardResult(output=UpperCamelCase_ )
except Exception as e:
raise HTTPException(5_00 , {'''error''': str(UpperCamelCase_ )} )
| 76 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 691 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 77 |
def lowerCamelCase( a__ ,a__):
return int((input_a, input_a).count(0) == 0)
def lowerCamelCase( ):
assert and_gate(0 ,0) == 0
assert and_gate(0 ,1) == 0
assert and_gate(1 ,0) == 0
assert and_gate(1 ,1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 691 | 0 |
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE_: Optional[int] =NewType('DataClass', Any)
SCREAMING_SNAKE_CASE_: str =NewType('DataClassType', Any)
def lowerCAmelCase_ ( snake_case_ : int ) -> Any:
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def lowerCAmelCase_ ( snake_case_ : list ) -> Callable[[str], Any]:
'''simple docstring'''
UpperCAmelCase_ = {str(snake_case_ ): choice for choice in choices}
return lambda snake_case_ : str_to_choice.get(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( *,
snake_case_ : Union[str, List[str]] = None , snake_case_ : str = None , snake_case_ : Any = dataclasses.MISSING , snake_case_ : Callable[[], Any] = dataclasses.MISSING , snake_case_ : dict = None , **snake_case_ : Optional[int] , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
UpperCAmelCase_ = {}
if aliases is not None:
UpperCAmelCase_ = aliases
if help is not None:
UpperCAmelCase_ = help
return dataclasses.field(metadata=snake_case_ , default=snake_case_ , default_factory=snake_case_ , **snake_case_ )
class __A ( UpperCamelCase__ ):
a__ : Iterable[DataClassType]
def __init__(self : Optional[Any] , __a : Union[DataClassType, Iterable[DataClassType]] , **__a : List[str] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
UpperCAmelCase_ = ArgumentDefaultsHelpFormatter
super().__init__(**__a )
if dataclasses.is_dataclass(__a ):
UpperCAmelCase_ = [dataclass_types]
UpperCAmelCase_ = list(__a )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__a )
@staticmethod
def _lowercase (__a : ArgumentParser , __a : dataclasses.Field ):
UpperCAmelCase_ = f"""--{field.name}"""
UpperCAmelCase_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __a ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
UpperCAmelCase_ = kwargs.pop("aliases" , [] )
if isinstance(__a , __a ):
UpperCAmelCase_ = [aliases]
UpperCAmelCase_ = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__a , "UnionType" ) and isinstance(__a , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__a ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f""" Problem encountered in field '{field.name}'.""" )
if type(__a ) not in field.type.__args__:
# filter `str` in Union
UpperCAmelCase_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
UpperCAmelCase_ = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
UpperCAmelCase_ = (
field.type.__args__[0] if isinstance(__a , field.type.__args__[1] ) else field.type.__args__[1]
)
UpperCAmelCase_ = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
UpperCAmelCase_ = {}
if origin_type is Literal or (isinstance(field.type , __a ) and issubclass(field.type , __a )):
if origin_type is Literal:
UpperCAmelCase_ = field.type.__args__
else:
UpperCAmelCase_ = [x.value for x in field.type]
UpperCAmelCase_ = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
UpperCAmelCase_ = field.default
else:
UpperCAmelCase_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
UpperCAmelCase_ = copy(__a )
# Hack because type=bool in argparse does not behave as we want.
UpperCAmelCase_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
UpperCAmelCase_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
UpperCAmelCase_ = default
# This tells argparse we accept 0 or 1 value after --field_name
UpperCAmelCase_ = "?"
# This is the value that will get picked if we do --field_name (without value)
UpperCAmelCase_ = True
elif isclass(__a ) and issubclass(__a , __a ):
UpperCAmelCase_ = field.type.__args__[0]
UpperCAmelCase_ = "+"
if field.default_factory is not dataclasses.MISSING:
UpperCAmelCase_ = field.default_factory()
elif field.default is dataclasses.MISSING:
UpperCAmelCase_ = True
else:
UpperCAmelCase_ = field.type
if field.default is not dataclasses.MISSING:
UpperCAmelCase_ = field.default
elif field.default_factory is not dataclasses.MISSING:
UpperCAmelCase_ = field.default_factory()
else:
UpperCAmelCase_ = True
parser.add_argument(__a , *__a , **__a )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
UpperCAmelCase_ = False
parser.add_argument(f"""--no_{field.name}""" , action="store_false" , dest=field.name , **__a )
def _lowercase (self : int , __a : DataClassType ):
if hasattr(__a , "_argument_group_name" ):
UpperCAmelCase_ = self.add_argument_group(dtype._argument_group_name )
else:
UpperCAmelCase_ = self
try:
UpperCAmelCase_ = get_type_hints(__a )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__a ):
UpperCAmelCase_ = ".".join(map(__a , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__a ):
if not field.init:
continue
UpperCAmelCase_ = type_hints[field.name]
self._parse_dataclass_field(__a , __a )
def _lowercase (self : Union[str, Any] , __a : Any=None , __a : Tuple=False , __a : Tuple=True , __a : Union[str, Any]=None , __a : Tuple=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
UpperCAmelCase_ = []
if args_filename:
args_files.append(Path(__a ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
UpperCAmelCase_ = ArgumentParser()
args_file_parser.add_argument(__a , type=__a , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
UpperCAmelCase_ , UpperCAmelCase_ = args_file_parser.parse_known_args(args=__a )
UpperCAmelCase_ = vars(__a ).get(args_file_flag.lstrip("-" ) , __a )
if cmd_args_file_paths:
args_files.extend([Path(__a ) for p in cmd_args_file_paths] )
UpperCAmelCase_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
UpperCAmelCase_ = file_args + args if args is not None else file_args + sys.argv[1:]
UpperCAmelCase_ , UpperCAmelCase_ = self.parse_known_args(args=__a )
UpperCAmelCase_ = []
for dtype in self.dataclass_types:
UpperCAmelCase_ = {f.name for f in dataclasses.fields(__a ) if f.init}
UpperCAmelCase_ = {k: v for k, v in vars(__a ).items() if k in keys}
for k in keys:
delattr(__a , __a )
UpperCAmelCase_ = dtype(**__a )
outputs.append(__a )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__a )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _lowercase (self : Tuple , __a : Dict[str, Any] , __a : bool = False ):
UpperCAmelCase_ = set(args.keys() )
UpperCAmelCase_ = []
for dtype in self.dataclass_types:
UpperCAmelCase_ = {f.name for f in dataclasses.fields(__a ) if f.init}
UpperCAmelCase_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
UpperCAmelCase_ = dtype(**__a )
outputs.append(__a )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__a )}""" )
return tuple(__a )
def _lowercase (self : Dict , __a : str , __a : bool = False ):
with open(Path(__a ) , encoding="utf-8" ) as open_json_file:
UpperCAmelCase_ = json.loads(open_json_file.read() )
UpperCAmelCase_ = self.parse_dict(__a , allow_extra_keys=__a )
return tuple(__a )
def _lowercase (self : int , __a : str , __a : bool = False ):
UpperCAmelCase_ = self.parse_dict(yaml.safe_load(Path(__a ).read_text() ) , allow_extra_keys=__a )
return tuple(__a )
| 78 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""junnyu/roformer_chinese_small""": 15_36,
"""junnyu/roformer_chinese_base""": 15_36,
"""junnyu/roformer_chinese_char_small""": 5_12,
"""junnyu/roformer_chinese_char_base""": 5_12,
"""junnyu/roformer_small_discriminator""": 1_28,
"""junnyu/roformer_small_generator""": 1_28,
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = RoFormerTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
UpperCAmelCase__ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
):
UpperCAmelCase__ : Union[str, Any] = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
UpperCAmelCase__ : List[Any] = do_lower_case
UpperCAmelCase__ : Union[str, Any] = strip_accents
UpperCAmelCase__ : Tuple = pre_tok_class(**_lowerCAmelCase )
UpperCAmelCase__ : Dict = do_lower_case
def __getstate__( self ):
UpperCAmelCase__ : Any = self.__dict__.copy()
UpperCAmelCase__ : int = BertPreTokenizer()
return state
def __setstate__( self , _lowerCAmelCase ):
UpperCAmelCase__ : List[str] = d
UpperCAmelCase__ : str = self.__dict__["""_tokenizer"""].get_vocab()
UpperCAmelCase__ : Optional[Any] = PreTokenizer.custom(JiebaPreTokenizer(_lowerCAmelCase ) )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
UpperCAmelCase__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
UpperCAmelCase__ : List[Any] = [self.sep_token_id]
UpperCAmelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
UpperCAmelCase__ : Dict = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=False , **_lowerCAmelCase , ):
UpperCAmelCase__ : List[str] = BertPreTokenizer()
return super().save_pretrained(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
| 79 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 0
UpperCAmelCase = False
UpperCAmelCase = 3.0
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_SCREAMING_SNAKE_CASE =Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_SCREAMING_SNAKE_CASE =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case_ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case_ : Dict = torch.nn.Linear(1_00, 2_00)
snake_case_ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case_ : Dict = ''''''
snake_case_ : str = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 691 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Tuple = SpeechTaTokenizer
__snake_case :int = False
__snake_case :Dict = True
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = SpeechTaTokenizer(_lowerCAmelCase )
__lowercase = AddedToken("""<mask>""" , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase )
__lowercase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : Optional[int] , _lowerCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = """this is a test"""
__lowercase = """this is a test"""
return input_text, output_text
def _a ( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : str=20 , _lowerCAmelCase : List[str]=5 ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.get_input_output_texts(_lowerCAmelCase )
__lowercase = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
__lowercase = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
return text, ids
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = """<pad>"""
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def _a ( self : str ) -> Any:
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(_lowerCAmelCase ) , 81 )
def _a ( self : List[Any] ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__lowercase = tokenizer.vocab_size
__lowercase = len(_lowerCAmelCase )
self.assertNotEqual(_lowerCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowercase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__lowercase = tokenizer.add_tokens(_lowerCAmelCase )
__lowercase = tokenizer.vocab_size
__lowercase = len(_lowerCAmelCase )
self.assertNotEqual(_lowerCAmelCase , 0 )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , len(_lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , all_size + len(_lowerCAmelCase ) )
__lowercase = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=_lowerCAmelCase )
self.assertGreaterEqual(len(_lowerCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowercase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__lowercase = tokenizer.add_special_tokens(_lowerCAmelCase )
__lowercase = tokenizer.vocab_size
__lowercase = len(_lowerCAmelCase )
self.assertNotEqual(_lowerCAmelCase , 0 )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , len(_lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , all_size_a + len(_lowerCAmelCase ) )
__lowercase = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=_lowerCAmelCase )
self.assertGreaterEqual(len(_lowerCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Any:
"""simple docstring"""
pass
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(_lowerCAmelCase , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__lowercase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
__lowercase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
# fmt: off
self.assertListEqual(_lowerCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__lowercase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
__lowercase = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=_lowerCAmelCase , )
| 80 |
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst | 691 | 0 |
import unittest
from knapsack import knapsack as k
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[str] ) -> Dict:
__snake_case : Any = 0
__snake_case : List[str] = [0]
__snake_case : List[str] = [0]
__snake_case : List[str] = len(lowerCamelCase )
self.assertEqual(k.knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , 0 )
__snake_case : str = [60]
__snake_case : Union[str, Any] = [10]
__snake_case : Optional[int] = len(lowerCamelCase )
self.assertEqual(k.knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , 0 )
def __snake_case ( self : List[Any] ) -> List[str]:
__snake_case : int = 3
__snake_case : List[str] = [1, 2, 3]
__snake_case : Any = [3, 2, 1]
__snake_case : Tuple = len(lowerCamelCase )
self.assertEqual(k.knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , 5 )
def __snake_case ( self : Dict ) -> List[str]:
__snake_case : Optional[int] = 50
__snake_case : int = [60, 100, 120]
__snake_case : Dict = [10, 20, 30]
__snake_case : List[str] = len(lowerCamelCase )
self.assertEqual(k.knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 81 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 | 0 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
lowerCamelCase = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
lowerCamelCase = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = 0.0
for i, j in zip(_UpperCAmelCase , _UpperCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_UpperCAmelCase , _UpperCAmelCase ) else 0.0
UpperCAmelCase_ = n_correct / len(_UpperCAmelCase )
return {
"accuracy": accuracy,
}
| 82 |
def lowerCamelCase( a__ ,a__ ,a__):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod
else:
_SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__)
return (b * b) % mod
# a prime number
snake_case_ : Union[str, Any] = 7_01
snake_case_ : int = 10_00_00_00_00
snake_case_ : str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 691 | 0 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase__ = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase__ = '''=======
>>>>>>>
'''
lowerCAmelCase__ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def snake_case_ ( A_ : Namespace ):
'''simple docstring'''
return ConvertCommand(args.tfds_path, args.datasets_directory )
class __snake_case ( _lowercase):
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : ArgumentParser ):
"""simple docstring"""
_lowerCamelCase : List[str] = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , *__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = get_logger('''datasets-cli/converting''' )
_lowerCamelCase : int = tfds_path
_lowerCamelCase : Dict = datasets_directory
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : Union[str, Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_lowerCamelCase : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
_lowerCamelCase : int = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
_lowerCamelCase : str = []
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Union[str, Any] = {}
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : List[str] = os.listdir(__lowerCAmelCase )
else:
_lowerCamelCase : Optional[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
_lowerCamelCase : Union[str, Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not os.path.isfile(__lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(__lowerCAmelCase , encoding='''utf-8''' ) as f:
_lowerCamelCase : Tuple = f.readlines()
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : int = False
_lowerCamelCase : Tuple = []
for line in lines:
_lowerCamelCase : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_lowerCamelCase : Union[str, Any] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
_lowerCamelCase : List[str] = ''''''
continue
elif "from absl import logging" in out_line:
_lowerCamelCase : str = '''from datasets import logging\n'''
elif "getLogger" in out_line:
_lowerCamelCase : Union[str, Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = list(filter(lambda __lowerCAmelCase : e in out_line , __lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCAmelCase ) + '''\n''' )
out_lines.append(__lowerCAmelCase )
out_lines.append(__lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_lowerCamelCase : str = re.sub(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_lowerCamelCase : Dict = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , __lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
_lowerCamelCase : Union[str, Any] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_lowerCamelCase : Any = True
out_lines.append(__lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_lowerCamelCase : Union[str, Any] = f_name.replace('''.py''' , '''''' )
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(__lowerCAmelCase )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(__lowerCAmelCase )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
_lowerCamelCase : Optional[int] = os.path.basename(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowerCAmelCase , __lowerCAmelCase )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 83 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , _a : int , _a : Optional[Any]=3 , _a : Tuple=32 , _a : Any=3 , _a : Union[str, Any]=10 , _a : Optional[int]=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[Any]=True , _a : int=True , _a : Tuple="relu" , _a : Optional[Any]=3 , _a : str=None , _a : List[Any]=["stage2", "stage3", "stage4"] , _a : Union[str, Any]=[2, 3, 4] , _a : Dict=1 , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : str , _a : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , _a : Any , _a : str , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_a : Any , _a : Optional[int] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self ) | 691 | 0 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for attribute in key.split('.' ):
lowercase = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).shape
else:
lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase = value
elif weight_type == "weight_g":
lowercase = value
elif weight_type == "weight_v":
lowercase = value
elif weight_type == "bias":
lowercase = value
else:
lowercase = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
lowercase = fairseq_model.state_dict()
lowercase = hf_model.feature_extractor
lowercase = hf_model.adapter
for name, value in fairseq_dict.items():
lowercase = False
if "conv_layers" in name:
load_conv_layer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
lowercase = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowercase = True
if "*" in mapped_key:
lowercase = name.split(__SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
lowercase = mapped_key.replace('*' , __SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowercase = 'weight_g'
elif "weight_v" in name:
lowercase = 'weight_v'
elif "bias" in name:
lowercase = 'bias'
elif "weight" in name:
lowercase = 'weight'
else:
lowercase = None
set_recursively(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(__SCREAMING_SNAKE_CASE )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = full_name.split('conv_layers.' )[-1]
lowercase = name.split('.' )
lowercase = int(items[0] )
lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = full_name.split('adaptor.' )[-1]
lowercase = name.split('.' )
if items[1].isdigit():
lowercase = int(items[1] )
else:
lowercase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
lowercase = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
lowercase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
lowercase = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
lowercase = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
lowercase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
lowercase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE )
lowercase = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
lowercase = WavaVecaConfig.from_pretrained(
__SCREAMING_SNAKE_CASE , add_adapter=__SCREAMING_SNAKE_CASE , adapter_stride=__SCREAMING_SNAKE_CASE , adapter_kernel_size=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , output_hidden_size=__SCREAMING_SNAKE_CASE , )
lowercase = MBartConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
# load model
lowercase , lowercase , lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
lowercase = model[0].eval()
# load feature extractor
lowercase = WavaVecaFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE )
# set weights for wav2vec2 encoder
lowercase = WavaVecaModel(__SCREAMING_SNAKE_CASE )
recursively_load_weights_wavaveca(model.encoder , __SCREAMING_SNAKE_CASE )
# load decoder weights
lowercase = MBartForCausalLM(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__SCREAMING_SNAKE_CASE )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase = SpeechEncoderDecoderModel(encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
lowercase = False
lowercase = MBartaaTokenizer(__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
lowercase = hf_wavavec.config.to_dict()
lowercase = tokenizer.pad_token_id
lowercase = tokenizer.bos_token_id
lowercase = tokenizer.eos_token_id
lowercase = 'mbart50'
lowercase = 'wav2vec2'
lowercase = tokenizer.eos_token_id
lowercase = 25_0004
lowercase = tokenizer.eos_token_id
lowercase = SpeechEncoderDecoderConfig.from_dict(__SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(__SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1024, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=25_0004, type=int, help='''`decoder_start_token_id` of model config''')
UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 84 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 | 0 |
from __future__ import annotations
from random import random
class snake_case :
def __init__( self : Any , a_ : int | None = None )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = value
SCREAMING_SNAKE_CASE__ : Optional[Any] = random()
SCREAMING_SNAKE_CASE__ : Node | None = None
SCREAMING_SNAKE_CASE__ : Node | None = None
def __repr__( self : int )-> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{F'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 )
def __str__( self : Tuple )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = str(self.value ) + ' '
SCREAMING_SNAKE_CASE__ : str = str(self.left or '' )
SCREAMING_SNAKE_CASE__ : Any = str(self.right or '' )
return value + left + right
def _a ( lowercase__ : Node | None , lowercase__ : int ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = split(root.left , lowercase__ )
return left, root
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = split(root.right , lowercase__ )
return root, right
def _a ( lowercase__ : Node | None , lowercase__ : Node | None ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
SCREAMING_SNAKE_CASE__ : Any = merge(left.right , lowercase__ )
return left
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = merge(lowercase__ , right.left )
return right
def _a ( lowercase__ : Node | None , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = Node(lowercase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = split(lowercase__ , lowercase__ )
return merge(merge(lowercase__ , lowercase__ ) , lowercase__ )
def _a ( lowercase__ : Node | None , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = split(lowercase__ , value - 1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = split(lowercase__ , lowercase__ )
return merge(lowercase__ , lowercase__ )
def _a ( lowercase__ : Node | None ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def _a ( lowercase__ : Node | None , lowercase__ : str ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
SCREAMING_SNAKE_CASE__ : List[Any] = insert(lowercase__ , int(arg[1:] ) )
elif arg[0] == "-":
SCREAMING_SNAKE_CASE__ : Any = erase(lowercase__ , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
SCREAMING_SNAKE_CASE__ : List[str] = input()
while args != "q":
SCREAMING_SNAKE_CASE__ : Tuple = interact_treap(lowercase__ , lowercase__ )
print(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 85 |
from manim import *
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_SCREAMING_SNAKE_CASE =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait() | 691 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__a :Dict = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ['pixel_values']
def __init__( self : Dict , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Dict[str, int]] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : Optional[int] , ):
super().__init__(**UpperCAmelCase )
A_ = size if size is not None else {"height": 224, "width": 224}
A_ = get_size_dict(UpperCAmelCase )
A_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
A_ = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase , param_name="crop_size" )
A_ = do_resize
A_ = do_rescale
A_ = do_normalize
A_ = do_center_crop
A_ = crop_size
A_ = size
A_ = resample
A_ = rescale_factor
A_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __A ( self : Any , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ):
A_ = get_size_dict(UpperCAmelCase )
if "shortest_edge" in size:
A_ = get_resize_output_image_size(UpperCAmelCase , size=size["shortest_edge"] , default_to_square=UpperCAmelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
A_ = (size["height"], size["width"])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ):
A_ = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCAmelCase , size=(size["height"], size["width"]) , data_format=UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : np.ndarray , UpperCAmelCase : float , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Any ):
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[str] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Dict , ):
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Optional[int] , UpperCAmelCase : ImageInput , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : int = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[float] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase : str , ):
A_ = do_resize if do_resize is not None else self.do_resize
A_ = do_rescale if do_rescale is not None else self.do_rescale
A_ = do_normalize if do_normalize is not None else self.do_normalize
A_ = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ = crop_size if crop_size is not None else self.crop_size
A_ = get_size_dict(UpperCAmelCase , param_name="crop_size" , default_to_square=UpperCAmelCase )
A_ = resample if resample is not None else self.resample
A_ = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ = image_mean if image_mean is not None else self.image_mean
A_ = image_std if image_std is not None else self.image_std
A_ = size if size is not None else self.size
A_ = get_size_dict(UpperCAmelCase )
if not is_batched(UpperCAmelCase ):
A_ = [images]
if not valid_images(UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
A_ = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
A_ = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
A_ = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
A_ = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
A_ = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
A_ = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
A_ = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase ) | 86 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case_ : List[str] = {'''facebook/blenderbot-3B''': 1_28}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = BlenderbotTokenizer
def __init__( self : Dict , _a : str=None , _a : Optional[int]=None , _a : List[str]=None , _a : int="replace" , _a : Dict="<s>" , _a : Optional[Any]="</s>" , _a : Any="</s>" , _a : int="<s>" , _a : int="<unk>" , _a : Optional[int]="<pad>" , _a : Tuple="<mask>" , _a : Tuple=False , _a : Union[str, Any]=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE ='''post_processor'''
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''sep'''] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''cls'''] )
_SCREAMING_SNAKE_CASE =False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Optional[Any] , *_a : str , **_a : int ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : List[Any] , *_a : Optional[int] , **_a : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Dict , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Any , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_SCREAMING_SNAKE_CASE =''' '''.join(_a )
_SCREAMING_SNAKE_CASE =self.encode(_a )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 691 | 0 |
from torch import nn
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 87 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> List[str]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] , **_a : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 691 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase__ ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=400 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=1 / 255 , SCREAMING_SNAKE_CASE=True , ) -> Dict:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : Union[str, Any] = min_resolution
_lowerCamelCase : Optional[int] = max_resolution
_lowerCamelCase : List[Any] = do_resize
_lowerCamelCase : str = size
_lowerCamelCase : Union[str, Any] = do_normalize
_lowerCamelCase : Union[str, Any] = image_mean
_lowerCamelCase : Tuple = image_std
_lowerCamelCase : List[Any] = do_rescale
_lowerCamelCase : Dict = rescale_factor
_lowerCamelCase : Any = do_pad
def UpperCamelCase_ ( self) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> Any:
if not batched:
_lowerCamelCase : Tuple = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE , Image.Image):
_lowerCamelCase , _lowerCamelCase : Any = image.size
else:
_lowerCamelCase , _lowerCamelCase : int = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase : Optional[int] = int(self.size["""shortest_edge"""] * h / w)
_lowerCamelCase : Optional[int] = self.size["""shortest_edge"""]
elif w > h:
_lowerCamelCase : Any = self.size["""shortest_edge"""]
_lowerCamelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * w / h)
else:
_lowerCamelCase : Optional[Any] = self.size["""shortest_edge"""]
_lowerCamelCase : Optional[Any] = self.size["""shortest_edge"""]
else:
_lowerCamelCase : Any = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase : List[str] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
_lowerCamelCase : List[Any] = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE: item[0])[0]
_lowerCamelCase : int = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase__ ( A_ ,unittest.TestCase ):
__UpperCAmelCase = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Dict = ConditionalDetrImageProcessingTester(self)
@property
def UpperCamelCase_ ( self) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """image_mean"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """image_std"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size"""))
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : int = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333})
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE)
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84})
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Tuple:
pass
def UpperCamelCase_ ( self) -> Any:
# Initialize image_processing
_lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image)
# Test not batched input
_lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self) -> int:
# Initialize image_processing
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray)
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
_lowerCamelCase , _lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase : int = image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""").pixel_values
_lowerCamelCase , _lowerCamelCase : Any = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self) -> Optional[Any]:
# Initialize image_processing
_lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor)
# Test not batched input
_lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""").pixel_values
_lowerCamelCase , _lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase_ ( self) -> str:
# prepare image and target
_lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""") as f:
_lowerCamelCase : List[Any] = json.loads(f.read())
_lowerCamelCase : Union[str, Any] = {"""image_id""": 3_9769, """annotations""": target}
# encode them
_lowerCamelCase : Any = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""")
_lowerCamelCase : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE , annotations=SCREAMING_SNAKE_CASE , return_tensors="""pt""")
# verify pixel values
_lowerCamelCase : Dict = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["""pixel_values"""].shape , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = torch.tensor([0.27_96, 0.31_38, 0.34_81])
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4))
# verify area
_lowerCamelCase : List[str] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , SCREAMING_SNAKE_CASE))
# verify boxes
_lowerCamelCase : Union[str, Any] = torch.Size([6, 4])
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , SCREAMING_SNAKE_CASE , atol=1e-3))
# verify image_id
_lowerCamelCase : Union[str, Any] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , SCREAMING_SNAKE_CASE))
# verify is_crowd
_lowerCamelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , SCREAMING_SNAKE_CASE))
# verify class_labels
_lowerCamelCase : Any = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , SCREAMING_SNAKE_CASE))
# verify orig_size
_lowerCamelCase : List[str] = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , SCREAMING_SNAKE_CASE))
# verify size
_lowerCamelCase : Tuple = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , SCREAMING_SNAKE_CASE))
@slow
def UpperCamelCase_ ( self) -> Optional[Any]:
# prepare image, target and masks_path
_lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""") as f:
_lowerCamelCase : Optional[int] = json.loads(f.read())
_lowerCamelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
_lowerCamelCase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""")
# encode them
_lowerCamelCase : List[str] = ConditionalDetrImageProcessor(format="""coco_panoptic""")
_lowerCamelCase : Dict = image_processing(images=SCREAMING_SNAKE_CASE , annotations=SCREAMING_SNAKE_CASE , masks_path=SCREAMING_SNAKE_CASE , return_tensors="""pt""")
# verify pixel values
_lowerCamelCase : int = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["""pixel_values"""].shape , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81])
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4))
# verify area
_lowerCamelCase : Union[str, Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , SCREAMING_SNAKE_CASE))
# verify boxes
_lowerCamelCase : int = torch.Size([6, 4])
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , SCREAMING_SNAKE_CASE , atol=1e-3))
# verify image_id
_lowerCamelCase : List[str] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , SCREAMING_SNAKE_CASE))
# verify is_crowd
_lowerCamelCase : Dict = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , SCREAMING_SNAKE_CASE))
# verify class_labels
_lowerCamelCase : str = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , SCREAMING_SNAKE_CASE))
# verify masks
_lowerCamelCase : Any = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , SCREAMING_SNAKE_CASE)
# verify orig_size
_lowerCamelCase : List[Any] = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , SCREAMING_SNAKE_CASE))
# verify size
_lowerCamelCase : Optional[Any] = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , SCREAMING_SNAKE_CASE))
| 88 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase:
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}, )
lowercase_ : int = field(
default=10_24, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the training data."""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the validation data."""} )
lowercase_ : Optional[str] = field(default=_a, metadata={"""help""": """A csv or a json file containing the test data."""} )
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_lowercase : int = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowercase : Tuple = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _lowerCamelCase:
lowercase_ : str = field(
default=_a, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, )
lowercase_ : str = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
def UpperCamelCase_( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowercase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
datasets.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowercase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowercase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowercase : Optional[Any] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowercase : Tuple = data_args.train_file.split('.' )[-1]
_lowercase : int = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowercase : Any = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_lowercase : str = load_dataset('csv' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowercase : Optional[int] = load_dataset('json' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowercase : Optional[Any] = raw_datasets['train'].features['label'].names
_lowercase : Any = len(lowerCamelCase_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowercase : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCamelCase_ , )
_lowercase : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowercase : int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowercase : str = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowercase : List[Any] = {'Refused': 0, 'Entailed': 1}
_lowercase : Union[str, Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowercase : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCamelCase_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCamelCase_ ):
_lowercase : int = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_lowercase : Any = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowercase : List[Any] = examples['statement']
_lowercase : Optional[Any] = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_lowercase : Union[str, Any] = tokenizer(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ )
_lowercase : Any = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_lowercase : str = raw_datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowercase : Any = raw_datasets['train']
if data_args.max_train_samples is not None:
_lowercase : str = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowercase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowercase : List[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_lowercase : Optional[int] = raw_datasets['test']
if data_args.max_predict_samples is not None:
_lowercase : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCamelCase_ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_ ):
_lowercase : Dict = p.predictions[0] if isinstance(p.predictions , lowerCamelCase_ ) else p.predictions
_lowercase : Tuple = np.argmax(lowerCamelCase_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowercase : Any = default_data_collator
elif training_args.fpaa:
_lowercase : str = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 )
else:
_lowercase : Optional[Any] = None
# Initialize our Trainer
_lowercase : List[str] = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
_lowercase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_lowercase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase : Optional[Any] = last_checkpoint
_lowercase : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
_lowercase : List[Any] = train_result.metrics
_lowercase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
_lowercase : int = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCamelCase_ )
trainer.save_metrics('train' , lowerCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase : Tuple = trainer.evaluate(eval_dataset=lowerCamelCase_ )
_lowercase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ )
_lowercase : Optional[int] = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('eval' , lowerCamelCase_ )
trainer.save_metrics('eval' , lowerCamelCase_ )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowercase : Any = predict_dataset.remove_columns('label' )
_lowercase : Optional[Any] = trainer.predict(lowerCamelCase_ , metric_key_prefix='predict' ).predictions
_lowercase : Union[str, Any] = np.argmax(lowerCamelCase_ , axis=1 )
_lowercase : Dict = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCamelCase_ ):
_lowercase : List[str] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
_lowercase : str = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 89 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__UpperCAmelCase = logging.get_logger(__name__)
class a__ ( a__ ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = [label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
if len(lowerCamelCase_ ) == 0 or len(lowerCamelCase_ ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(lowerCamelCase_ ) )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = [sequences]
lowerCAmelCase__ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(a__ )
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=ZeroShotClassificationArgumentHandler() , *lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = args_parser
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=TruncationStrategy.ONLY_FIRST , **lowerCamelCase_ ) -> Optional[Any]:
lowerCAmelCase__ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
lowerCAmelCase__ = self.tokenizer.eos_token
try:
lowerCAmelCase__ = self.tokenizer(
lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , )
except Exception as e:
if "too short" in str(lowerCamelCase_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
lowerCAmelCase__ = self.tokenizer(
lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> List[str]:
if kwargs.get('''multi_class''' , lowerCamelCase_ ) is not None:
lowerCAmelCase__ = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs['''hypothesis_template''']
lowerCAmelCase__ = {}
if "multi_label" in kwargs:
lowerCAmelCase__ = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ , ) -> Dict:
if len(lowerCamelCase_ ) == 0:
pass
elif len(lowerCamelCase_ ) == 1 and "candidate_labels" not in kwargs:
lowerCAmelCase__ = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This example is {}." ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ = self._args_parser(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ):
lowerCAmelCase__ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCamelCase_ ) - 1,
**model_input,
}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[int]:
lowerCAmelCase__ = inputs['''candidate_label''']
lowerCAmelCase__ = inputs['''sequence''']
lowerCAmelCase__ = {k: inputs[k] for k in self.tokenizer.model_input_names}
lowerCAmelCase__ = self.model(**lowerCamelCase_ )
lowerCAmelCase__ = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=False ) -> Tuple:
lowerCAmelCase__ = [outputs['''candidate_label'''] for outputs in model_outputs]
lowerCAmelCase__ = [outputs['''sequence'''] for outputs in model_outputs]
lowerCAmelCase__ = np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
lowerCAmelCase__ = logits.shape[0]
lowerCAmelCase__ = len(lowerCamelCase_ )
lowerCAmelCase__ = N // n
lowerCAmelCase__ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCamelCase_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
lowerCAmelCase__ = self.entailment_id
lowerCAmelCase__ = -1 if entailment_id == 0 else 0
lowerCAmelCase__ = reshaped_outputs[..., [contradiction_id, entailment_id]]
lowerCAmelCase__ = np.exp(lowerCamelCase_ ) / np.exp(lowerCamelCase_ ).sum(-1 , keepdims=lowerCamelCase_ )
lowerCAmelCase__ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
lowerCAmelCase__ = reshaped_outputs[..., self.entailment_id]
lowerCAmelCase__ = np.exp(lowerCamelCase_ ) / np.exp(lowerCamelCase_ ).sum(-1 , keepdims=lowerCamelCase_ )
lowerCAmelCase__ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 90 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset | 691 | 0 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : List[str] ,A_ : List[Any]=sys.maxsize ) -> List[str]:
A = 'bilinear'
A = max_size
A = short_edge_length
def __call__( self : Dict ,A_ : Union[str, Any] ) -> int:
A = []
for img in imgs:
A , A = img.shape[:2]
# later: provide list and randomly choose index for resize
A = np.random.randint(self.short_edge_length[0] ,self.short_edge_length[1] + 1 )
if size == 0:
return img
A = size * 1.0 / min(A_ ,A_ )
if h < w:
A , A = size, scale * w
else:
A , A = scale * h, size
if max(A_ ,A_ ) > self.max_size:
A = self.max_size * 1.0 / max(A_ ,A_ )
A = newh * scale
A = neww * scale
A = int(neww + 0.5 )
A = int(newh + 0.5 )
if img.dtype == np.uinta:
A = Image.fromarray(A_ )
A = pil_image.resize((neww, newh) ,PILImageResampling.BILINEAR )
A = np.asarray(A_ )
else:
A = img.permute(2 ,0 ,1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
A = nn.functional.interpolate(
A_ ,(newh, neww) ,mode=self.interp_method ,align_corners=A_ ).squeeze(0 )
img_augs.append(A_ )
return img_augs
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : List[str] ) -> int:
A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] ,cfg.INPUT.MAX_SIZE_TEST )
A = cfg.INPUT.FORMAT
A = cfg.SIZE_DIVISIBILITY
A = cfg.PAD_VALUE
A = cfg.INPUT.MAX_SIZE_TEST
A = cfg.MODEL.DEVICE
A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 )
A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 )
A = lambda A_ : (x - self.pixel_mean) / self.pixel_std
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ) -> List[Any]:
A = tuple(max(A_ ) for s in zip(*[img.shape for img in images] ) )
A = [im.shape[-2:] for im in images]
A = [
nn.functional.pad(
A_ ,[0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] ,value=self.pad_value ,)
for size, im in zip(A_ ,A_ )
]
return torch.stack(A_ ), torch.tensor(A_ )
def __call__( self : Any ,A_ : int ,A_ : List[Any]=False ) -> int:
with torch.no_grad():
if not isinstance(A_ ,A_ ):
A = [images]
if single_image:
assert len(A_ ) == 1
for i in range(len(A_ ) ):
if isinstance(images[i] ,torch.Tensor ):
images.insert(A_ ,images.pop(A_ ).to(self.device ).float() )
elif not isinstance(images[i] ,torch.Tensor ):
images.insert(
A_ ,torch.as_tensor(img_tensorize(images.pop(A_ ) ,input_format=self.input_format ) )
.to(self.device )
.float() ,)
# resize smallest edge
A = torch.tensor([im.shape[:2] for im in images] )
A = self.aug(A_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
A = [self.normalizer(A_ ) for x in images]
# now pad them to do the following operations
A , A = self.pad(A_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
A = torch.true_divide(A_ ,A_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _snake_case ( snake_case__ : Tuple , snake_case__ : Tuple[int, int] ):
assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!"
A , A = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__ )
tensor[:, 1].clamp_(min=0 , max=snake_case__ )
tensor[:, 2].clamp_(min=0 , max=snake_case__ )
tensor[:, 3].clamp_(min=0 , max=snake_case__ ) | 91 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 | 0 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = """▁"""
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BertGenerationTokenizer
lowerCamelCase_ = False
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
lowercase : List[str] =BertGenerationTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : int ='''<s>'''
lowercase : List[Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1002 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Any =BertGenerationTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
lowercase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : List[Any] =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : Any =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Optional[Any] ='''Hello World!'''
lowercase : Dict =[18536, 2260, 101]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Tuple =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowercase : Optional[int] =[
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowercase : Union[str, Any] =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : Union[str, Any] =''' '''.join(UpperCAmelCase__ )
lowercase : Optional[int] =self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Optional[Any] =self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : str =BertGenerationConfig()
lowercase : str =BertGenerationEncoder(UpperCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# fmt: off
lowercase : Optional[Any] ={'''input_ids''': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 92 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 | 0 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = [2, 1, 2, -1]
lowerCAmelCase__ :Tuple = [1, 2, 3, 4]
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = len(self.first_signal )
lowerCAmelCase__ :Dict = len(self.second_signal )
lowerCAmelCase__ :List[str] = max(__UpperCAmelCase , __UpperCAmelCase )
# create a zero matrix of max_length x max_length
lowerCAmelCase__ :Optional[Any] = [[0] * max_length for i in range(__UpperCAmelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ :int = deque(self.second_signal )
rotated_signal.rotate(__UpperCAmelCase )
for j, item in enumerate(__UpperCAmelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowerCAmelCase__ :List[str] = np.matmul(np.transpose(__UpperCAmelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__UpperCAmelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 93 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 691 | 0 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
SCREAMING_SNAKE_CASE = CLIPImageProcessor()
SCREAMING_SNAKE_CASE = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
SCREAMING_SNAKE_CASE = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 94 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 95 |
def lowerCamelCase( a__ ,a__):
return number | (1 << position)
def lowerCamelCase( a__ ,a__):
return number & ~(1 << position)
def lowerCamelCase( a__ ,a__):
return number ^ (1 << position)
def lowerCamelCase( a__ ,a__):
return ((number >> position) & 1) == 1
def lowerCamelCase( a__ ,a__):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 | 0 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__lowerCamelCase = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def a ( __UpperCAmelCase : Tuple ) -> Union[str, Any]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict ) -> Dict:
return max(metric_fn(__UpperCAmelCase , __UpperCAmelCase ) for gt in ground_truths )
def a ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] ) -> int:
__magic_name__: Optional[Any] = [line.strip() for line in open(__UpperCAmelCase , """r""" ).readlines()]
__magic_name__: Union[str, Any] = []
if args.gold_data_mode == "qa":
__magic_name__: Union[str, Any] = pd.read_csv(__UpperCAmelCase , sep="""\t""" , header=__UpperCAmelCase )
for answer_list in data[1]:
__magic_name__: List[Any] = ast.literal_eval(__UpperCAmelCase )
answers.append(__UpperCAmelCase )
else:
__magic_name__: Any = [line.strip() for line in open(__UpperCAmelCase , """r""" ).readlines()]
__magic_name__: List[Any] = [[reference] for reference in references]
__magic_name__: Any = 0
for prediction, ground_truths in zip(__UpperCAmelCase , __UpperCAmelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
fa += metric_max_over_ground_truths(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Union[str, Any] = 1_00.0 * em / total
__magic_name__: str = 1_00.0 * fa / total
logger.info(f'F1: {fa:.2f}' )
logger.info(f'EM: {em:.2f}' )
def a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
__magic_name__: List[str] = args.k
__magic_name__: List[str] = [line.strip() for line in open(__UpperCAmelCase , """r""" ).readlines()]
__magic_name__: int = [line.strip() for line in open(__UpperCAmelCase , """r""" ).readlines()]
__magic_name__: int = 0
for hypo, reference in zip(__UpperCAmelCase , __UpperCAmelCase ):
__magic_name__: List[Any] = set(hypo.split("""\t""" )[:k] )
__magic_name__: Dict = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__magic_name__: int = 1_00.0 * em / total
logger.info(f'Precision@{k}: {em: .2f}' )
def a ( __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] ) -> Any:
def strip_title(__UpperCAmelCase : Optional[int] ):
if title.startswith("""\"""" ):
__magic_name__: int = title[1:]
if title.endswith("""\"""" ):
__magic_name__: Tuple = title[:-1]
return title
__magic_name__: List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCAmelCase , return_tensors="""pt""" , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , )["""input_ids"""].to(args.device )
__magic_name__: Tuple = rag_model.rag.question_encoder(__UpperCAmelCase )
__magic_name__: List[Any] = question_enc_outputs[0]
__magic_name__: Optional[int] = rag_model.retriever(
__UpperCAmelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
__magic_name__: Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__magic_name__: Any = []
for docs in all_docs:
__magic_name__: Any = [strip_title(__UpperCAmelCase ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(__UpperCAmelCase ) )
return provenance_strings
def a ( __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : str ) -> str:
with torch.no_grad():
__magic_name__: str = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCAmelCase , return_tensors="""pt""" , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )
__magic_name__: int = inputs_dict.input_ids.to(args.device )
__magic_name__: Optional[int] = inputs_dict.attention_mask.to(args.device )
__magic_name__: Union[str, Any] = rag_model.generate( # rag_model overwrites generate
__UpperCAmelCase , attention_mask=__UpperCAmelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCAmelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__magic_name__: Any = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
if args.print_predictions:
for q, a in zip(__UpperCAmelCase , __UpperCAmelCase ):
logger.info("""Q: {} - A: {}""".format(__UpperCAmelCase , __UpperCAmelCase ) )
return answers
def a ( ) -> Dict:
__magic_name__: Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__UpperCAmelCase , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=__UpperCAmelCase , choices=["""exact""", """compressed""", """legacy"""] , type=__UpperCAmelCase , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=__UpperCAmelCase , type=__UpperCAmelCase , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=__UpperCAmelCase , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__UpperCAmelCase , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=__UpperCAmelCase , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=__UpperCAmelCase , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=__UpperCAmelCase , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=__UpperCAmelCase , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=__UpperCAmelCase , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=__UpperCAmelCase , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=5_0 , type=__UpperCAmelCase , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
__magic_name__: List[str] = parser.parse_args()
__magic_name__: Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def a ( __UpperCAmelCase : Optional[Any] ) -> Tuple:
__magic_name__: Optional[Any] = {}
if args.model_type is None:
__magic_name__: Any = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
__magic_name__: Tuple = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
__magic_name__: List[Any] = args.n_docs
if args.index_name is not None:
__magic_name__: Optional[int] = args.index_name
if args.index_path is not None:
__magic_name__: Any = args.index_path
else:
__magic_name__: Any = BartForConditionalGeneration
__magic_name__: Any = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , __UpperCAmelCase )
__magic_name__: Union[str, Any] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
__magic_name__: Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(__UpperCAmelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(__UpperCAmelCase ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
__magic_name__: Optional[int] = RagRetriever.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__magic_name__: Dict = model_class.from_pretrained(__UpperCAmelCase , retriever=__UpperCAmelCase , **__UpperCAmelCase )
model.retriever.init_retrieval()
else:
__magic_name__: Optional[Any] = model_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
__magic_name__: List[Any] = []
for line in tqdm(__UpperCAmelCase ):
questions.append(line.strip() )
if len(__UpperCAmelCase ) == args.eval_batch_size:
__magic_name__: Union[str, Any] = evaluate_batch_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
preds_file.write("""\n""".join(__UpperCAmelCase ) + """\n""" )
preds_file.flush()
__magic_name__: List[str] = []
if len(__UpperCAmelCase ) > 0:
__magic_name__: Dict = evaluate_batch_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
preds_file.write("""\n""".join(__UpperCAmelCase ) )
preds_file.flush()
score_fn(__UpperCAmelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__lowerCamelCase = get_args()
main(args)
| 96 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =8
# DPR tok
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Dict ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self : Optional[int] , _a : bool ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dataset''' )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_SCREAMING_SNAKE_CASE ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary. | 691 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Any ) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 97 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_a )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_SCREAMING_SNAKE_CASE =DDIMScheduler(**_a )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : str , _a : int , _a : int=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE ='''A red cartoon frog, 4k'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_a , image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a ) | 691 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
lowercase__ : Optional[Any] = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
lowercase__ : str = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
lowercase__ : Optional[int] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
_UpperCamelCase = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
_UpperCamelCase = evaluate(dataset=lowerCAmelCase__ , predictions=lowerCAmelCase__ )
return score
| 98 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 691 | 0 |
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase__ , int(b / 2 ) ) * actual_power(lowerCAmelCase__ , int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase__ , int(b / 2 ) ) * actual_power(lowerCAmelCase__ , int(b / 2 ) )
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
if b < 0:
return 1 / actual_power(lowerCAmelCase__ , lowerCAmelCase__ )
return actual_power(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
print(power(-2, -3))
| 99 |
def lowerCamelCase( a__ ,a__):
return int((input_a, input_a).count(0) == 0)
def lowerCamelCase( ):
assert and_gate(0 ,0) == 0
assert and_gate(0 ,1) == 0
assert and_gate(1 ,0) == 0
assert and_gate(1 ,1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 691 | 0 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_A : Optional[int] = True
except ImportError:
_A : List[str] = False
try:
from torch.hub import _get_torch_home
_A : List[Any] = _get_torch_home()
except ImportError:
_A : int = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
_A : int = os.path.join(torch_cache_home, """transformers""")
_A : str = """https://cdn.huggingface.co"""
_A : List[str] = """https://s3.amazonaws.com/models.huggingface.co/bert"""
_A : Dict = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
_A : Optional[Any] = os.path.join(PATH, """config.yaml""")
_A : List[Any] = os.path.join(PATH, """attributes.txt""")
_A : List[Any] = os.path.join(PATH, """objects.txt""")
_A : str = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
_A : List[Any] = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
_A : List[Any] = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
_A : Tuple = """pytorch_model.bin"""
_A : Any = """config.yaml"""
def __snake_case ( lowerCAmelCase_=OBJECTS , lowerCAmelCase_=ATTRIBUTES ) -> Any:
SCREAMING_SNAKE_CASE__ = []
with open(lowerCAmelCase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
SCREAMING_SNAKE_CASE__ = []
with open(lowerCAmelCase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def __snake_case ( lowerCAmelCase_ ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = OrderedDict()
with open(lowerCAmelCase_ , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = pkl.load(lowerCAmelCase_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
SCREAMING_SNAKE_CASE__ = ckp.pop(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , np.ndarray ):
SCREAMING_SNAKE_CASE__ = torch.tensor(lowerCAmelCase_ )
else:
assert isinstance(lowerCAmelCase_ , torch.tensor ), type(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = v
return r
class __snake_case :
'''simple docstring'''
lowerCamelCase__ : int = {}
def __init__( self , A_ , A_ = "root" , A_=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = name
SCREAMING_SNAKE_CASE__ = level
SCREAMING_SNAKE_CASE__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
SCREAMING_SNAKE_CASE__ = copy.deepcopy(A_ )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(A_ )
if isinstance(A_ , A_ ):
SCREAMING_SNAKE_CASE__ = Config(A_ , name=A_ , level=level + 1 )
SCREAMING_SNAKE_CASE__ = v
setattr(self , A_ , A_ )
SCREAMING_SNAKE_CASE__ = d
def __repr__( self ):
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = val
SCREAMING_SNAKE_CASE__ = val
SCREAMING_SNAKE_CASE__ = key.split('''.''' )
SCREAMING_SNAKE_CASE__ = len(A_ ) - 1
SCREAMING_SNAKE_CASE__ = self._pointer
if len(A_ ) > 1:
for i, l in enumerate(A_ ):
if hasattr(self , A_ ) and isinstance(getattr(self , A_ ) , A_ ):
setattr(getattr(self , A_ ) , '''.'''.join(levels[i:] ) , A_ )
if l == last_level:
SCREAMING_SNAKE_CASE__ = val
else:
SCREAMING_SNAKE_CASE__ = pointer[l]
def lowercase_ ( self ):
'''simple docstring'''
return self._pointer
def lowercase_ ( self , A_ , A_ ):
'''simple docstring'''
with open(f'''{file_name}''' , '''w''' ) as stream:
dump(A_ , A_ )
def lowercase_ ( self , A_ , A_ ):
'''simple docstring'''
with open(f'''{file_name}''' , '''w''' ) as stream:
json.dump(A_ , A_ )
@staticmethod
def lowercase_ ( A_ ):
'''simple docstring'''
with open(A_ ) as stream:
SCREAMING_SNAKE_CASE__ = load(A_ , Loader=A_ )
return data
def __str__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ''' '''
if self._name != "root":
SCREAMING_SNAKE_CASE__ = f'''{t * (self._level-1)}{self._name}:\n'''
else:
SCREAMING_SNAKE_CASE__ = ''''''
SCREAMING_SNAKE_CASE__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A_ , A_ ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(A_ ).__name__})\n'''
SCREAMING_SNAKE_CASE__ = level
return r[:-1]
@classmethod
def lowercase_ ( cls , A_ , **A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(A_ , **A_ )
return cls(A_ )
@classmethod
def lowercase_ ( cls , A_ , **A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = kwargs.pop('''cache_dir''' , A_ )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''force_download''' , A_ )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''resume_download''' , A_ )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''proxies''' , A_ )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''local_files_only''' , A_ )
if os.path.isdir(A_ ):
SCREAMING_SNAKE_CASE__ = os.path.join(A_ , A_ )
elif os.path.isfile(A_ ) or is_remote_url(A_ ):
SCREAMING_SNAKE_CASE__ = pretrained_model_name_or_path
else:
SCREAMING_SNAKE_CASE__ = hf_bucket_url(A_ , filename=A_ , use_cdn=A_ )
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE__ = cached_path(
A_ , cache_dir=A_ , force_download=A_ , proxies=A_ , resume_download=A_ , local_files_only=A_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
SCREAMING_SNAKE_CASE__ = Config.load_yaml(A_ )
except EnvironmentError:
SCREAMING_SNAKE_CASE__ = '''Can\'t load config for'''
raise EnvironmentError(A_ )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A_ ), kwargs
def __snake_case ( lowerCAmelCase_ ) -> Tuple:
SCREAMING_SNAKE_CASE__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
SCREAMING_SNAKE_CASE__ = in_tensor.numpy()
SCREAMING_SNAKE_CASE__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , rtol=0.01 , atol=0.1 ), (
f'''{sum([1 for x in np.isclose(lowerCAmelCase_ , lowerCAmelCase_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_0_0:.4f} %'''
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def __snake_case ( lowerCAmelCase_ ) -> str:
SCREAMING_SNAKE_CASE__ = urlparse(lowerCAmelCase_ )
return parsed.scheme in ("http", "https")
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ) -> str:
SCREAMING_SNAKE_CASE__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
SCREAMING_SNAKE_CASE__ = '''/''' not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=0 , lowerCAmelCase_=None , ) -> Any:
SCREAMING_SNAKE_CASE__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase_ , lowerCAmelCase_ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
ua += "; " + user_agent
SCREAMING_SNAKE_CASE__ = {'''user-agent''': ua}
if resume_size > 0:
SCREAMING_SNAKE_CASE__ = '''bytes=%d-''' % (resume_size,)
SCREAMING_SNAKE_CASE__ = requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ , proxies=lowerCAmelCase_ , headers=lowerCAmelCase_ )
if response.status_code == 4_1_6: # Range not satisfiable
return
SCREAMING_SNAKE_CASE__ = response.headers.get('''Content-Length''' )
SCREAMING_SNAKE_CASE__ = resume_size + int(lowerCAmelCase_ ) if content_length is not None else None
SCREAMING_SNAKE_CASE__ = tqdm(
unit='''B''' , unit_scale=lowerCAmelCase_ , total=lowerCAmelCase_ , initial=lowerCAmelCase_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1_0_2_4 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase_ ) )
temp_file.write(lowerCAmelCase_ )
progress.close()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=1_0 , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=False , ) -> List[Any]:
if cache_dir is None:
SCREAMING_SNAKE_CASE__ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = str(lowerCAmelCase_ )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = None
if not local_files_only:
try:
SCREAMING_SNAKE_CASE__ = requests.head(lowerCAmelCase_ , allow_redirects=lowerCAmelCase_ , proxies=lowerCAmelCase_ , timeout=lowerCAmelCase_ )
if response.status_code == 2_0_0:
SCREAMING_SNAKE_CASE__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
SCREAMING_SNAKE_CASE__ = url_to_filename(lowerCAmelCase_ , lowerCAmelCase_ )
# get cache path to put the file
SCREAMING_SNAKE_CASE__ = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase_ ):
return cache_path
else:
SCREAMING_SNAKE_CASE__ = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase_ ) > 0:
return os.path.join(lowerCAmelCase_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
SCREAMING_SNAKE_CASE__ = cache_path + '''.lock'''
with FileLock(lowerCAmelCase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
SCREAMING_SNAKE_CASE__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase_ , '''a+b''' ) as f:
yield f
SCREAMING_SNAKE_CASE__ = _resumable_file_manager
if os.path.exists(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = os.stat(lowerCAmelCase_ ).st_size
else:
SCREAMING_SNAKE_CASE__ = 0
else:
SCREAMING_SNAKE_CASE__ = partial(tempfile.NamedTemporaryFile , dir=lowerCAmelCase_ , delete=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , lowerCAmelCase_ , temp_file.name , )
http_get(
lowerCAmelCase_ , lowerCAmelCase_ , proxies=lowerCAmelCase_ , resume_size=lowerCAmelCase_ , user_agent=lowerCAmelCase_ , )
os.replace(temp_file.name , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {'''url''': url, '''etag''': etag}
SCREAMING_SNAKE_CASE__ = cache_path + '''.json'''
with open(lowerCAmelCase_ , '''w''' ) as meta_file:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
return cache_path
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_=None ) -> str:
SCREAMING_SNAKE_CASE__ = url.encode('''utf-8''' )
SCREAMING_SNAKE_CASE__ = shaaaa(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = url_hash.hexdigest()
if etag:
SCREAMING_SNAKE_CASE__ = etag.encode('''utf-8''' )
SCREAMING_SNAKE_CASE__ = shaaaa(lowerCAmelCase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Optional[Any]:
if cache_dir is None:
SCREAMING_SNAKE_CASE__ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = str(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = str(lowerCAmelCase_ )
if is_remote_url(lowerCAmelCase_ ):
# URL, so get it from the cache (downloading if necessary)
SCREAMING_SNAKE_CASE__ = get_from_cache(
lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , user_agent=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
elif os.path.exists(lowerCAmelCase_ ):
# File, and it exists.
SCREAMING_SNAKE_CASE__ = url_or_filename
elif urlparse(lowerCAmelCase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase_ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase_ ) and not tarfile.is_tarfile(lowerCAmelCase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = os.path.split(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
SCREAMING_SNAKE_CASE__ = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
if os.path.isdir(lowerCAmelCase_ ) and os.listdir(lowerCAmelCase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
SCREAMING_SNAKE_CASE__ = output_path + '''.lock'''
with FileLock(lowerCAmelCase_ ):
shutil.rmtree(lowerCAmelCase_ , ignore_errors=lowerCAmelCase_ )
os.makedirs(lowerCAmelCase_ )
if is_zipfile(lowerCAmelCase_ ):
with ZipFile(lowerCAmelCase_ , '''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase_ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = tarfile.open(lowerCAmelCase_ )
tar_file.extractall(lowerCAmelCase_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase_ ) )
return output_path_extracted
return output_path
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_="," ) -> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
if os.path.isfile(lowerCAmelCase_ ):
with open(lowerCAmelCase_ ) as f:
SCREAMING_SNAKE_CASE__ = eval(f.read() )
else:
SCREAMING_SNAKE_CASE__ = requests.get(lowerCAmelCase_ )
try:
SCREAMING_SNAKE_CASE__ = requests.json()
except Exception:
SCREAMING_SNAKE_CASE__ = req.content.decode()
assert data is not None, "could not connect"
try:
SCREAMING_SNAKE_CASE__ = eval(lowerCAmelCase_ )
except Exception:
SCREAMING_SNAKE_CASE__ = data.split('''\n''' )
req.close()
return data
def __snake_case ( lowerCAmelCase_ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = requests.get(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __snake_case ( lowerCAmelCase_ ) -> int:
SCREAMING_SNAKE_CASE__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''rb''' ) as stream:
SCREAMING_SNAKE_CASE__ = pkl.load(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = weights.pop('''model''' )
SCREAMING_SNAKE_CASE__ = {}
for k, v in model.items():
SCREAMING_SNAKE_CASE__ = torch.from_numpy(lowerCAmelCase_ )
if "running_var" in k:
SCREAMING_SNAKE_CASE__ = torch.tensor([0] )
SCREAMING_SNAKE_CASE__ = k.replace('''running_var''' , '''num_batches_tracked''' )
SCREAMING_SNAKE_CASE__ = zero
return new
def __snake_case ( ) -> Any:
print(f'''{os.path.abspath(os.path.join(lowerCAmelCase_ , os.pardir ) )}/demo.ipynb''' )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_="RGB" ) -> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
if os.path.isfile(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = cva.imread(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = get_image_from_url(lowerCAmelCase_ )
assert img is not None, f'''could not connect to: {im}'''
SCREAMING_SNAKE_CASE__ = cva.cvtColor(lowerCAmelCase_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
SCREAMING_SNAKE_CASE__ = img[:, :, ::-1]
return img
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> int:
return (images[i : i + batch] for i in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ ))
| 100 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """Speech2TextFeatureExtractor"""
_UpperCAmelCase = """Speech2TextTokenizer"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('raw_speech' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('audio' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop('sampling_rate' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('text' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE_ : Dict = args[0]
SCREAMING_SNAKE_CASE_ : str = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
SCREAMING_SNAKE_CASE_ : List[str] = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ : int = encodings['input_ids']
return inputs
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@contextmanager
def UpperCamelCase__ ( self ):
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ : List[str] = self.feature_extractor
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
| 101 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 0
UpperCAmelCase = False
UpperCAmelCase = 3.0
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_SCREAMING_SNAKE_CASE =Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_SCREAMING_SNAKE_CASE =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case_ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case_ : Dict = torch.nn.Linear(1_00, 2_00)
snake_case_ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case_ : Dict = ''''''
snake_case_ : str = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 691 | 0 |
"""simple docstring"""
import numpy
class lowercase__ :
"""simple docstring"""
def __init__( self , _A , _A ):
'''simple docstring'''
UpperCamelCase : Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCamelCase : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCamelCase : Union[str, Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCamelCase : Dict = numpy.random.rand(3 , 1 )
# Real output values provided.
UpperCamelCase : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCamelCase : Optional[Any] = numpy.zeros(output_array.shape )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCamelCase : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCamelCase : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
UpperCamelCase : List[str] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
UpperCamelCase : List[str] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _a ( self , _A , _A , _A ):
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
UpperCamelCase : int = self.feedforward()
self.back_propagation()
if give_loss:
UpperCamelCase : Dict = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : List[Any] = input_arr
UpperCamelCase : int = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
UpperCamelCase : str = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
UpperCamelCase : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
return 1 / (1 + numpy.exp(-value ))
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
return (value) * (1 - (value))
def UpperCamelCase ():
UpperCamelCase : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
UpperCamelCase : Optional[int] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
UpperCamelCase : Dict = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE , output_array=SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE , iterations=10 , give_loss=SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 102 |
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst | 691 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case = random.Random()
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=1.0 , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Optional[Any]:
if rng is None:
_snake_case = global_rng
_snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : int=7 , __lowerCamelCase : List[Any]=4_0_0 , __lowerCamelCase : Optional[int]=2_0_0_0 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : List[Any]=1_6_0_0_0 , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[int]=True , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = min_seq_length
_snake_case = max_seq_length
_snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case = feature_size
_snake_case = padding_value
_snake_case = sampling_rate
_snake_case = return_attention_mask
_snake_case = do_normalize
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=False ):
"""simple docstring"""
def _flatten(__lowerCamelCase : Any ):
return list(itertools.chain(*__lowerCamelCase ) )
if equal_length:
_snake_case = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_snake_case = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Tuple = WavaVecaFeatureExtractor
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = WavaVecaFeatureExtractionTester(self )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(__lowerCamelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCamelCase , axis=0 ) - 1 ) < 1E-3 ) )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_snake_case = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
_snake_case = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
_snake_case = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) )
# Test batched
_snake_case = feat_extract(__lowerCamelCase , return_tensors='''np''' ).input_values
_snake_case = feat_extract(__lowerCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_snake_case = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_snake_case = np.asarray(__lowerCamelCase )
_snake_case = feat_extract(__lowerCamelCase , return_tensors='''np''' ).input_values
_snake_case = feat_extract(__lowerCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_snake_case = ['''longest''', '''max_length''', '''do_not_pad''']
_snake_case = [None, 1_6_0_0, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase ):
_snake_case = feat_extract(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors='''np''' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = range(8_0_0 , 1_4_0_0 , 2_0_0 )
_snake_case = [floats_list((1, x) )[0] for x in lengths]
_snake_case = ['''longest''', '''max_length''', '''do_not_pad''']
_snake_case = [None, 1_6_0_0, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase ):
_snake_case = feat_extract(__lowerCamelCase , max_length=__lowerCamelCase , padding=__lowerCamelCase )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_snake_case = feat_extract(
__lowerCamelCase , truncation=__lowerCamelCase , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_snake_case = feat_extract(
__lowerCamelCase , truncation=__lowerCamelCase , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
_snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_snake_case = feat_extract(
__lowerCamelCase , truncation=__lowerCamelCase , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
import torch
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = np.random.rand(1_0_0 ).astype(np.floataa )
_snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_snake_case = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_snake_case = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_snake_case = WavaVecaConfig.from_pretrained(__lowerCamelCase )
_snake_case = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 103 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 | 0 |
"""simple docstring"""
from functools import lru_cache
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> set:
"""simple docstring"""
A__ = 2
A__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCAmelCase_ )
if n > 1:
factors.add(UpperCAmelCase_ )
return factors
@lru_cache
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(UpperCAmelCase_ ) )
def _lowerCamelCase ( UpperCAmelCase_ : list ) -> bool:
"""simple docstring"""
return len(set(UpperCAmelCase_ ) ) in (0, 1)
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> list:
"""simple docstring"""
A__ = 2
while True:
# Increment each value of a generated range
A__ = [base + i for i in range(UpperCAmelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
A__ = [upf_len(UpperCAmelCase_ ) for x in group]
checker.append(UpperCAmelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCAmelCase_ ):
return group
# Increment our base variable by 1
base += 1
def _lowerCamelCase ( UpperCAmelCase_ : int = 4 ) -> int:
"""simple docstring"""
A__ = run(UpperCAmelCase_ )
return results[0] if len(UpperCAmelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 104 |
def lowerCamelCase( a__ ,a__ ,a__):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod
else:
_SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__)
return (b * b) % mod
# a prime number
snake_case_ : Union[str, Any] = 7_01
snake_case_ : int = 10_00_00_00_00
snake_case_ : str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 691 | 0 |
from __future__ import annotations
import typing
from collections import Counter
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> typing.Counter[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowerCamelCase_ , max_perimeter + 1 ):
SCREAMING_SNAKE_CASE_ : int = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = pythagorean_triple(lowerCamelCase_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"""Perimeter {solution()} has maximum solutions""")
| 105 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , _a : int , _a : Optional[Any]=3 , _a : Tuple=32 , _a : Any=3 , _a : Union[str, Any]=10 , _a : Optional[int]=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[Any]=True , _a : int=True , _a : Tuple="relu" , _a : Optional[Any]=3 , _a : str=None , _a : List[Any]=["stage2", "stage3", "stage4"] , _a : Union[str, Any]=[2, 3, 4] , _a : Dict=1 , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : str , _a : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , _a : Any , _a : str , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_a : Any , _a : Optional[int] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self ) | 691 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case :List[Any] ={
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] =[
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[Any] =[
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__snake_case :Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 106 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = StableUnCLIPImgaImgPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCAmelCase = frozenset([] )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
_A = 32
_A = embedder_hidden_size
# image encoding components
_A = CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
_A = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCamelCase__, projection_dim=UpperCamelCase__, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
_A = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase__ )
_A = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
_A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
_A = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=UpperCamelCase__, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, ) )
torch.manual_seed(0 )
_A = UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D'), up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D'), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='projection', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=UpperCamelCase__, layers_per_block=1, upcast_attention=UpperCamelCase__, use_linear_projection=UpperCamelCase__, )
torch.manual_seed(0 )
_A = DDIMScheduler(
beta_schedule='scaled_linear', beta_start=0.00_085, beta_end=0.012, prediction_type='v_prediction', set_alpha_to_one=UpperCamelCase__, steps_offset=1, )
torch.manual_seed(0 )
_A = AutoencoderKL()
_A = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : str, UpperCamelCase__ : List[Any]=0, UpperCamelCase__ : Optional[int]=True ) -> Tuple:
if str(UpperCamelCase__ ).startswith('mps' ):
_A = torch.manual_seed(UpperCamelCase__ )
else:
_A = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
_A = floats_tensor((1, 3, 32, 32), rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if pil_image:
_A = input_image * 0.5 + 0.5
_A = input_image.clamp(0, 1 )
_A = input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
_A = DiffusionPipeline.numpy_to_pil(UpperCamelCase__ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __UpperCAmelCase ( self : str ) -> Any:
_A = 'cpu' # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableUnCLIPImgaImgPipeline(**UpperCamelCase__ )
_A = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_A = self.get_dummy_inputs(UpperCamelCase__ )
inputs.update({'image_embeds': None} )
_A = sd_pipe(**UpperCamelCase__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCAmelCase ( self : int ) -> int:
_A = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase__ )
def __UpperCAmelCase ( self : int ) -> List[Any]:
_A = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def __UpperCAmelCase ( self : Any ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCamelCase__ )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : str ) -> int:
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
_A = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img', torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = pipe(UpperCamelCase__, 'anime turle', generator=UpperCamelCase__, output_type='np' )
_A = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
_A = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img', torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = pipe(UpperCamelCase__, 'anime turle', generator=UpperCamelCase__, output_type='np' )
_A = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img', torch_dtype=torch.floataa )
_A = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_A = pipe(
UpperCamelCase__, 'anime turtle', num_inference_steps=2, output_type='np', )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 107 |
from manim import *
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_SCREAMING_SNAKE_CASE =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait() | 691 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: Union[str, Any] = logging.get_logger(__name__)
__a: Dict = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''vivit'''
def __init__( self : str , lowerCamelCase : Any=224 , lowerCamelCase : str=32 , lowerCamelCase : Tuple=[2, 16, 16] , lowerCamelCase : str=3 , lowerCamelCase : Union[str, Any]=768 , lowerCamelCase : Any=12 , lowerCamelCase : Any=12 , lowerCamelCase : str=3072 , lowerCamelCase : Optional[int]="gelu_fast" , lowerCamelCase : str=0.0 , lowerCamelCase : int=0.0 , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : str=1E-06 , lowerCamelCase : int=True , **lowerCamelCase : Union[str, Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = num_frames
_UpperCAmelCase = tubelet_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = qkv_bias
super().__init__(**lowerCamelCase ) | 108 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case_ : List[str] = {'''facebook/blenderbot-3B''': 1_28}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = BlenderbotTokenizer
def __init__( self : Dict , _a : str=None , _a : Optional[int]=None , _a : List[str]=None , _a : int="replace" , _a : Dict="<s>" , _a : Optional[Any]="</s>" , _a : Any="</s>" , _a : int="<s>" , _a : int="<unk>" , _a : Optional[int]="<pad>" , _a : Tuple="<mask>" , _a : Tuple=False , _a : Union[str, Any]=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE ='''post_processor'''
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''sep'''] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''cls'''] )
_SCREAMING_SNAKE_CASE =False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Optional[Any] , *_a : str , **_a : int ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : List[Any] , *_a : Optional[int] , **_a : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Dict , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Any , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_SCREAMING_SNAKE_CASE =''' '''.join(_a )
_SCREAMING_SNAKE_CASE =self.encode(_a )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 691 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {"vocab_file": "spiece.model"}
a = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
a = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
a = "▁"
class __a ( _snake_case ):
__UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : List[str]=False ,lowerCamelCase : Dict="[CLS]" ,lowerCamelCase : List[str]="[SEP]" ,lowerCamelCase : List[str]="<unk>" ,lowerCamelCase : Tuple="[SEP]" ,lowerCamelCase : Tuple="<pad>" ,lowerCamelCase : str="[CLS]" ,lowerCamelCase : Optional[Any]="[MASK]" ,lowerCamelCase : Optional[Dict[str, Any]] = None ,**lowerCamelCase : Dict ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
AddedToken(lowerCamelCase ,lstrip=lowerCamelCase ,rstrip=lowerCamelCase ,normalized=lowerCamelCase )
if isinstance(lowerCamelCase ,lowerCamelCase )
else mask_token
)
__SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase ,remove_space=lowerCamelCase ,keep_accents=lowerCamelCase ,bos_token=lowerCamelCase ,eos_token=lowerCamelCase ,unk_token=lowerCamelCase ,sep_token=lowerCamelCase ,pad_token=lowerCamelCase ,cls_token=lowerCamelCase ,mask_token=lowerCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = remove_space
__SCREAMING_SNAKE_CASE = keep_accents
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.__dict__.copy()
__SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : Optional[Any] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : int ):
'''simple docstring'''
if self.remove_space:
__SCREAMING_SNAKE_CASE = """ """.join(inputs.strip().split() )
else:
__SCREAMING_SNAKE_CASE = inputs
__SCREAMING_SNAKE_CASE = outputs.replace("""``""" ,"""\"""" ).replace("""''""" ,"""\"""" )
if not self.keep_accents:
__SCREAMING_SNAKE_CASE = unicodedata.normalize("""NFKD""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = """""".join([c for c in outputs if not unicodedata.combining(lowerCamelCase )] )
if self.do_lower_case:
__SCREAMING_SNAKE_CASE = outputs.lower()
return outputs
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.preprocess_text(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.sp_model.encode(lowerCamelCase ,out_type=lowerCamelCase )
__SCREAMING_SNAKE_CASE = []
for piece in pieces:
if len(lowerCamelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase ,"""""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__SCREAMING_SNAKE_CASE = cur_pieces[1:]
else:
__SCREAMING_SNAKE_CASE = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase )
else:
new_pieces.append(lowerCamelCase )
return new_pieces
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.PieceToId(lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCamelCase )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = """"""
__SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase ) + token
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(lowerCamelCase )
return out_string.strip()
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : List[int] ,lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[int] ,lowerCamelCase : Optional[List[int]] = None ,lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase ,token_ids_a=lowerCamelCase ,already_has_special_tokens=lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1]
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : List[int] ,lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : str ,lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase ,"""wb""" ) as fi:
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
| 109 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> List[str]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] , **_a : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 691 | 0 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : int ) -> Optional[Any]:
'''simple docstring'''
if len(a__ ) < 2:
return collection
def circle_sort_util(_UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : Optional[int] ) -> bool:
__UpperCAmelCase : Optional[Any] = False
if low == high:
return swapped
__UpperCAmelCase : List[str] = low
__UpperCAmelCase : Any = high
while left < right:
if collection[left] > collection[right]:
__UpperCAmelCase ,__UpperCAmelCase : Dict = (
collection[right],
collection[left],
)
__UpperCAmelCase : int = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__UpperCAmelCase ,__UpperCAmelCase : Dict = (
collection[right + 1],
collection[left],
)
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Any = low + int((high - low) / 2 )
__UpperCAmelCase : str = circle_sort_util(a__ , a__ , a__ )
__UpperCAmelCase : Dict = circle_sort_util(a__ , mid + 1 , a__ )
return swapped or left_swap or right_swap
__UpperCAmelCase : List[str] = True
while is_not_sorted is True:
__UpperCAmelCase : List[str] = circle_sort_util(a__ , 0 , len(a__ ) - 1 )
return collection
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase : Tuple = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 139 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 | 0 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCamelCase__ ),"Tatoeba directory does not exist." )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_a )
@slow
def _snake_case ( self : Dict ):
'''simple docstring'''
self.resolver.convert_models(["""heb-eng"""] )
@slow
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : List[str] = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=_a )
assert mmeta["long_pair"] == "heb-eng"
| 519 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset | 691 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCAmelCase__ = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _a ( a :Any , a :Dict=None , a :int=None , a :Dict=None ) -> Optional[Any]:
a = True
while ask_again:
a = input(a__ )
try:
if default is not None and len(a__ ) == 0:
return default
return convert_value(a__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(a__ )
def _a ( a :str , a :List[str]=[] , a :List[Any]=None , a :Tuple=0 ) -> List[Any]:
a = BulletMenu(a__ , a__ )
a = menu.run(default_choice=a__ )
return convert_value(a__ ) if convert_value is not None else result
def _a ( a :Union[str, Any] ) -> int:
a = int(a__ )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def _a ( a :int ) -> List[str]:
a = int(a__ )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def _a ( a :Optional[int] ) -> Dict:
a = int(a__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _a ( a :Optional[int] ) -> List[str]:
a = int(a__ )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def _a ( a :Any ) -> List[str]:
a = int(a__ )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def _a ( a :Union[str, Any] ) -> int:
return {"yes": True, "no": False}[value.lower()]
class lowercase_ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int ) ->int:
"""simple docstring"""
a = super()._format_usage(_a , _a , _a , _a )
a = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 117 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A : Union[str, Any] = logging.get_logger(__name__)
A : List[str] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A : Union[str, Any] = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
A : List[Any] = {
'''gpt-neox-20b''': 2048,
}
class UpperCamelCase( UpperCamelCase__ ):
snake_case_ : List[str] = VOCAB_FILES_NAMES
snake_case_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]="<|endoftext|>" , SCREAMING_SNAKE_CASE : Optional[Any]="<|endoftext|>" , SCREAMING_SNAKE_CASE : Optional[Any]="<|endoftext|>" , SCREAMING_SNAKE_CASE : List[Any]=False , **SCREAMING_SNAKE_CASE : Optional[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(
_a , _a , tokenizer_file=_a , unk_token=_a , bos_token=_a , eos_token=_a , add_prefix_space=_a , **_a , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _a ) != add_prefix_space:
__snake_case = getattr(_a , pre_tok_state.pop("type" ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**_a )
__snake_case = add_prefix_space
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]:
'''simple docstring'''
__snake_case = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
__snake_case = input_ids[-self.model_max_length :]
return input_ids
| 371 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : int , ) -> Optional[int]:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 691 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
_lowerCAmelCase : Any = ViTImageProcessor if is_vision_available() else None
@property
def __lowercase ( self : str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = (3, 32, 1_28)
SCREAMING_SNAKE_CASE : str = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE : Tuple = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
SCREAMING_SNAKE_CASE : Any = dict(zip(_a , range(len(_a ) ) ) )
SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
SCREAMING_SNAKE_CASE : Dict = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 1_28},
}
SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __lowercase ( self : Optional[Any] , **lowerCAmelCase__ : str ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self : Optional[int] , **lowerCAmelCase__ : Tuple ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self : Tuple ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[Any] = MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : str = MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE : Any = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
SCREAMING_SNAKE_CASE : Optional[Any] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = MgpstrProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(_a , return_tensors='''np''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = MgpstrProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE : List[str] = '''test'''
SCREAMING_SNAKE_CASE : Tuple = processor(text=_a )
SCREAMING_SNAKE_CASE : Any = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = MgpstrProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE : Optional[Any] = '''test'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Dict = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = MgpstrProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.char_decode(_a )
SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(_a )
SCREAMING_SNAKE_CASE : Any = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : str = MgpstrProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : List[Any] = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = MgpstrProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(1 , 27 , 38 )
SCREAMING_SNAKE_CASE : Tuple = torch.randn(1 , 27 , 5_02_57 )
SCREAMING_SNAKE_CASE : List[Any] = torch.randn(1 , 27 , 3_05_22 )
SCREAMING_SNAKE_CASE : Optional[int] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 527 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_UpperCamelCase = TypeVar('''T''')
class _lowerCamelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> None:
'''simple docstring'''
__snake_case : str = None
__snake_case : int = len(_a )
__snake_case : Dict = [any_type for _ in range(self.N )] + arr
__snake_case : Optional[int] = fnc
self.build()
def UpperCAmelCase ( self ) -> None:
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
__snake_case : int = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> None:
'''simple docstring'''
p += self.N
__snake_case : Tuple = v
while p > 1:
__snake_case : List[Any] = p // 2
__snake_case : Tuple = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> T | None: # noqa: E741
'''simple docstring'''
__snake_case , __snake_case : Dict = l + self.N, r + self.N
__snake_case : List[str] = None
while l <= r:
if l % 2 == 1:
__snake_case : List[str] = self.st[l] if res is None else self.fn(_a , self.st[l] )
if r % 2 == 0:
__snake_case : str = self.st[r] if res is None else self.fn(_a , self.st[r] )
__snake_case , __snake_case : List[Any] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_UpperCamelCase = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
_UpperCamelCase = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
_UpperCamelCase = SegmentTree(test_array, min)
_UpperCamelCase = SegmentTree(test_array, max)
_UpperCamelCase = SegmentTree(test_array, lambda a, b: a + b)
def lowerCAmelCase__( ) -> List[str]:
for i in range(len(a__ ) ):
for j in range(a__ , len(a__ ) ):
__snake_case : int = reduce(a__ , test_array[i : j + 1] )
__snake_case : Dict = reduce(a__ , test_array[i : j + 1] )
__snake_case : List[Any] = reduce(lambda lowercase , lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(a__ , a__ )
assert max_range == max_segment_tree.query(a__ , a__ )
assert sum_range == sum_segment_tree.query(a__ , a__ )
test_all_segments()
for index, value in test_updates.items():
_UpperCamelCase = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 243 |
def lowerCamelCase( a__ ,a__):
return number | (1 << position)
def lowerCamelCase( a__ ,a__):
return number & ~(1 << position)
def lowerCamelCase( a__ ,a__):
return number ^ (1 << position)
def lowerCamelCase( a__ ,a__):
return ((number >> position) & 1) == 1
def lowerCamelCase( a__ ,a__):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def _lowerCamelCase( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> List[Any]:
for attribute in key.split("." ):
__snake_case = getattr(a__ , a__ )
if weight_type is not None:
__snake_case = getattr(a__ , a__ ).shape
else:
__snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__snake_case = value
elif weight_type == "weight_g":
__snake_case = value
elif weight_type == "weight_v":
__snake_case = value
elif weight_type == "bias":
__snake_case = value
else:
__snake_case = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _lowerCamelCase( __snake_case , __snake_case , __snake_case ) -> Any:
__snake_case = []
__snake_case = fairseq_model.state_dict()
__snake_case = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__snake_case = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == "group" , )
__snake_case = True
else:
for key, mapped_key in MAPPING.items():
__snake_case = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
__snake_case = True
if "*" in mapped_key:
__snake_case = name.split(a__ )[0].split("." )[-2]
__snake_case = mapped_key.replace("*" , a__ )
if "weight_g" in name:
__snake_case = "weight_g"
elif "weight_v" in name:
__snake_case = "weight_v"
elif "weight" in name:
__snake_case = "weight"
elif "bias" in name:
__snake_case = "bias"
else:
__snake_case = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowerCamelCase( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Optional[int]:
__snake_case = full_name.split("conv_layers." )[-1]
__snake_case = name.split("." )
__snake_case = int(items[0] )
__snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__snake_case = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__snake_case = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__snake_case = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__snake_case = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a__ )
@torch.no_grad()
def _lowerCamelCase( __snake_case , __snake_case , __snake_case=None , __snake_case=None , __snake_case=True ) -> List[str]:
if config_path is not None:
__snake_case = HubertConfig.from_pretrained(a__ )
else:
__snake_case = HubertConfig()
if is_finetuned:
if dict_path:
__snake_case = Dictionary.load(a__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case = target_dict.pad_index
__snake_case = target_dict.bos_index
__snake_case = target_dict.eos_index
__snake_case = len(target_dict.symbols )
__snake_case = os.path.join(a__ , "vocab.json" )
if not os.path.isdir(a__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(a__ ) )
return
os.makedirs(a__ , exist_ok=a__ )
with open(a__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , a__ )
__snake_case = WavaVecaCTCTokenizer(
a__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=a__ , )
__snake_case = True if config.feat_extract_norm == "layer" else False
__snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
__snake_case = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ )
processor.save_pretrained(a__ )
__snake_case = HubertForCTC(a__ )
else:
__snake_case = HubertModel(a__ )
if is_finetuned:
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__snake_case = model[0].eval()
recursively_load_weights(a__ , a__ , a__ )
hf_wavavec.save_pretrained(a__ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCamelCase__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 524 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =8
# DPR tok
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Dict ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self : Optional[int] , _a : bool ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dataset''' )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_SCREAMING_SNAKE_CASE ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary. | 691 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return (data["data"], data["target"])
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = XGBClassifier()
classifier.fit(a__ , a__ )
return classifier
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = load_iris()
snake_case_ , snake_case_ = data_handling(a__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = train_test_split(
a__ , a__ , test_size=0.25 )
snake_case_ = iris['target_names']
# Create an XGBoost Classifier from the training data
snake_case_ = xgboost(a__ , a__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
a__ , a__ , a__ , display_labels=a__ , cmap='Blues' , normalize='true' , )
plt.title('Normalized Confusion Matrix - IRIS Dataset' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 362 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_a )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_SCREAMING_SNAKE_CASE =DDIMScheduler(**_a )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : str , _a : int , _a : int=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE ='''A red cartoon frog, 4k'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_a , image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a ) | 691 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
def a__ ( lowerCAmelCase ) -> Dict:
UpperCAmelCase__ : Any = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
UpperCAmelCase__ : Tuple = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(a__ ).content
if __name__ == "__main__":
_A = input("""Enter Video/IGTV url: """).strip()
_A = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 182 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 691 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__a = StableDiffusionInpaintPipeline
__a = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__a = frozenset([] )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
__UpperCAmelCase : Tuple = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
__UpperCAmelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__UpperCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
__UpperCAmelCase : int = CLIPTextModel(_a )
__UpperCAmelCase : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCAmelCase : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any=0 ):
'''simple docstring'''
__UpperCAmelCase : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
__UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Any = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
__UpperCAmelCase : Optional[int] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(_a ).startswith("""mps""" ):
__UpperCAmelCase : Dict = torch.manual_seed(_a )
else:
__UpperCAmelCase : List[str] = torch.Generator(device=_a ).manual_seed(_a )
__UpperCAmelCase : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : str = StableDiffusionInpaintPipeline(**_a )
__UpperCAmelCase : Optional[int] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
__UpperCAmelCase : int = self.get_dummy_inputs(_a )
__UpperCAmelCase : str = sd_pipe(**_a ).images
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : int = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__UpperCAmelCase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__UpperCAmelCase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__UpperCAmelCase : Union[str, Any] = """stabilityai/stable-diffusion-2-inpainting"""
__UpperCAmelCase : Dict = StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__UpperCAmelCase : List[str] = """Face of a yellow cat, high resolution, sitting on a park bench"""
__UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type="""np""" , )
__UpperCAmelCase : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__UpperCAmelCase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__UpperCAmelCase : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__UpperCAmelCase : int = """stabilityai/stable-diffusion-2-inpainting"""
__UpperCAmelCase : int = StableDiffusionInpaintPipeline.from_pretrained(
_a , torch_dtype=torch.floataa , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__UpperCAmelCase : Dict = """Face of a yellow cat, high resolution, sitting on a park bench"""
__UpperCAmelCase : Dict = torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type="""np""" , )
__UpperCAmelCase : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__UpperCAmelCase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__UpperCAmelCase : List[Any] = """stabilityai/stable-diffusion-2-inpainting"""
__UpperCAmelCase : Optional[Any] = PNDMScheduler.from_pretrained(_a , subfolder="""scheduler""" )
__UpperCAmelCase : Tuple = StableDiffusionInpaintPipeline.from_pretrained(
_a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
__UpperCAmelCase : Dict = torch.manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type="""np""" , )
__UpperCAmelCase : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 139 |
def lowerCamelCase( a__ ,a__):
return int((input_a, input_a).count(0) == 0)
def lowerCamelCase( ):
assert and_gate(0 ,0) == 0
assert and_gate(0 ,1) == 0
assert and_gate(1 ,0) == 0
assert and_gate(1 ,1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 691 | 0 |
__UpperCamelCase : Optional[int] = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 519 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class A_ ( UpperCamelCase__ ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Optional[int] ):
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
__a = self.model.config
else:
__a = config
__a = data_args
__a = self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
__a = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__a = label_smoothed_nll_loss
def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int ):
if self.optimizer is None:
__a = ["bias", "LayerNorm.weight"]
__a = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
__a = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__a = Adafactor
__a = {"scale_parameter": False, "relative_step": False}
else:
__a = AdamW
__a = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
__a = self.args.learning_rate
if self.sharded_ddp:
__a = OSS(
params=_a , optim=_a , **_a , )
else:
__a = optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
__a = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str ):
__a = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__a = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__a = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__a = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def _UpperCAmelCase ( self : Tuple ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _UpperCAmelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__a = model(**_a , use_cache=_a )[0]
__a = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__a , __a = model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
__a = model(**_a , use_cache=_a )[0]
__a = torch.nn.functional.log_softmax(_a , dim=-1 )
__a , __a = self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _UpperCAmelCase ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a = inputs.pop("labels" )
__a , __a = self._compute_loss(_a , _a , _a )
return loss
def _UpperCAmelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, Union[torch.Tensor, Any]] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ):
__a = self._prepare_inputs(_a )
__a = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__a = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__a = self._pad_tensors_to_max_len(_a , gen_kwargs["max_length"] )
__a = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
__a , __a = self._compute_loss(_a , _a , _a )
__a = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__a = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__a = self._pad_tensors_to_max_len(_a , gen_kwargs["max_length"] )
return (loss, logits, labels)
def _UpperCAmelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple ):
__a = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f""" padded to `max_length`={max_length}""" )
__a = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__a = tensor
return padded_tensor
| 197 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 0
UpperCAmelCase = False
UpperCAmelCase = 3.0
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_SCREAMING_SNAKE_CASE =Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_SCREAMING_SNAKE_CASE =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case_ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case_ : Dict = torch.nn.Linear(1_00, 2_00)
snake_case_ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case_ : Dict = ''''''
snake_case_ : str = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 691 | 0 |
import torch
from torch import nn
class lowercase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : int=False ) ->str:
"""simple docstring"""
super().__init__()
a = n_token
a = d_embed
a = d_proj
a = cutoffs + [n_token]
a = [0] + self.cutoffs
a = div_val
a = self.cutoffs[0]
a = len(self.cutoffs ) - 1
a = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
a = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
a = nn.Parameter(torch.zeros(self.n_clusters ) )
a = nn.ModuleList()
a = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_a , _a ) ) )
else:
self.out_projs.append(_a )
self.out_layers.append(nn.Linear(_a , _a ) )
else:
for i in range(len(self.cutoffs ) ):
a , a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_a , _a ) ) )
self.out_layers.append(nn.Linear(_a , r_idx - l_idx ) )
a = keep_order
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ) ->Tuple:
"""simple docstring"""
if proj is None:
a = nn.functional.linear(_a , _a , bias=_a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
a = nn.functional.linear(_a , proj.t().contiguous() )
a = nn.functional.linear(_a , _a , bias=_a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : List[Any]=False ) ->Dict:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
a = hidden[..., :-1, :].contiguous()
a = labels[..., 1:].contiguous()
a = hidden.view(-1 , hidden.size(-1 ) )
a = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
a = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
a = self._compute_logit(_a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
a = labels != -100
a = torch.zeros_like(_a , dtype=hidden.dtype , device=hidden.device )
a = (
-nn.functional.log_softmax(_a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
a = nn.functional.log_softmax(_a , dim=-1 )
else:
# construct weights and biases
a , a = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
a , a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a = self.out_layers[0].weight[l_idx:r_idx]
a = self.out_layers[0].bias[l_idx:r_idx]
else:
a = self.out_layers[i].weight
a = self.out_layers[i].bias
if i == 0:
a = torch.cat([weight_i, self.cluster_weight] , dim=0 )
a = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_a )
biases.append(_a )
a , a , a = weights[0], biases[0], self.out_projs[0]
a = self._compute_logit(_a , _a , _a , _a )
a = nn.functional.log_softmax(_a , dim=1 )
if labels is None:
a = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
a = torch.zeros_like(_a , dtype=hidden.dtype , device=hidden.device )
a = 0
a = [0] + self.cutoffs
for i in range(len(_a ) - 1 ):
a , a = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
a = (labels >= l_idx) & (labels < r_idx)
a = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
a = labels.index_select(0 , _a ) - l_idx
a = head_logprob.index_select(0 , _a )
a = hidden.index_select(0 , _a )
else:
a = hidden
if i == 0:
if labels is not None:
a = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
a = head_logprob[:, : self.cutoffs[0]]
else:
a , a , a = weights[i], biases[i], self.out_projs[i]
a = self._compute_logit(_a , _a , _a , _a )
a = nn.functional.log_softmax(_a , dim=1 )
a = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
a = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
a = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
a = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , _a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __lowerCAmelCase ( self : int , __UpperCAmelCase : str ) ->Optional[int]:
"""simple docstring"""
if self.n_clusters == 0:
a = self._compute_logit(_a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_a , dim=-1 )
else:
# construct weights and biases
a , a = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
a , a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a = self.out_layers[0].weight[l_idx:r_idx]
a = self.out_layers[0].bias[l_idx:r_idx]
else:
a = self.out_layers[i].weight
a = self.out_layers[i].bias
if i == 0:
a = torch.cat([weight_i, self.cluster_weight] , dim=0 )
a = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_a )
biases.append(_a )
a , a , a = weights[0], biases[0], self.out_projs[0]
a = self._compute_logit(_a , _a , _a , _a )
a = hidden.new_empty((head_logit.size(0 ), self.n_token) )
a = nn.functional.log_softmax(_a , dim=1 )
a = [0] + self.cutoffs
for i in range(len(_a ) - 1 ):
a , a = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
a = head_logprob[:, : self.cutoffs[0]]
else:
a , a , a = weights[i], biases[i], self.out_projs[i]
a = self._compute_logit(_a , _a , _a , _a )
a = nn.functional.log_softmax(_a , dim=1 )
a = head_logprob[:, -i] + tail_logprob_i
a = logprob_i
return out
| 117 |
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst | 691 | 0 |
def _lowerCAmelCase ( _lowerCAmelCase ) -> Any:
'''simple docstring'''
if len(a__ ) <= 1:
return [tuple(a__ )]
__snake_case = []
def generate(_lowerCAmelCase , _lowerCAmelCase ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , a__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__snake_case , __snake_case = arr[k - 1], arr[i]
else: # k is odd
__snake_case , __snake_case = arr[k - 1], arr[0]
generate(k - 1 , a__ )
generate(len(a__ ) , a__ )
return res
if __name__ == "__main__":
A : int = input('Enter numbers separated by a comma:\n').strip()
A : Optional[Any] = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 371 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 | 0 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __SCREAMING_SNAKE_CASE ( enum.Enum ):
lowerCamelCase_ = 0
lowerCamelCase_ = 1
lowerCamelCase_ = 2
@add_end_docstrings(UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
lowerCamelCase_ = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : int ):
'''simple docstring'''
super().__init__(*_a , **_a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowercase : List[str] =None
if self.model.config.prefix is not None:
lowercase : Dict =self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowercase : Optional[int] =self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowercase , lowercase , lowercase : int =self._sanitize_parameters(prefix=_a , **self._forward_params )
lowercase : Dict ={**self._preprocess_params, **preprocess_params}
lowercase : Union[str, Any] ={**self._forward_params, **forward_params}
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : int , ):
'''simple docstring'''
lowercase : Optional[int] ={}
if prefix is not None:
lowercase : Union[str, Any] =prefix
if prefix:
lowercase : int =self.tokenizer(
_a , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
lowercase : Union[str, Any] =prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
''' [None, \'hole\']''' )
lowercase : Optional[Any] =handle_long_generation
preprocess_params.update(_a )
lowercase : int =generate_kwargs
lowercase : Union[str, Any] ={}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
lowercase : Union[str, Any] =ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
lowercase : Any =ReturnType.TENSORS
if return_type is not None:
lowercase : List[Any] =return_type
if clean_up_tokenization_spaces is not None:
lowercase : Optional[Any] =clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase : str =self.tokenizer.encode(_a , add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase : List[str] =stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCamelCase_ ( self : Optional[int] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_a , **_a )
def __call__( self : Optional[int] , UpperCAmelCase__ : int , **UpperCAmelCase__ : Any ):
'''simple docstring'''
return super().__call__(_a , **_a )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any]="" , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Any =self.tokenizer(
prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
lowercase : Optional[int] =prompt_text
if handle_long_generation == "hole":
lowercase : Tuple =inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowercase : Tuple =generate_kwargs['''max_new_tokens''']
else:
lowercase : Optional[Any] =generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowercase : List[Any] =self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
lowercase : Any =inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
lowercase : Optional[int] =inputs['''attention_mask'''][:, -keep_length:]
return inputs
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Any =model_inputs['''input_ids''']
lowercase : List[Any] =model_inputs.get('''attention_mask''' , _a )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowercase : Any =None
lowercase : List[str] =None
lowercase : Optional[Any] =1
else:
lowercase : Union[str, Any] =input_ids.shape[0]
lowercase : Optional[int] =model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowercase : str =generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
lowercase : List[str] ='''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
lowercase : Optional[int] =generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowercase : Optional[int] ='''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowercase : str =self.model.generate(input_ids=_a , attention_mask=_a , **_a )
lowercase : List[str] =generated_sequence.shape[0]
if self.framework == "pt":
lowercase : int =generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowercase : Dict =tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any]=ReturnType.FULL_TEXT , UpperCAmelCase__ : List[str]=True ):
'''simple docstring'''
lowercase : str =model_outputs['''generated_sequence'''][0]
lowercase : Tuple =model_outputs['''input_ids''']
lowercase : str =model_outputs['''prompt_text''']
lowercase : str =generated_sequence.numpy().tolist()
lowercase : Optional[int] =[]
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowercase : Tuple ={'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowercase : Dict =self.tokenizer.decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowercase : str =0
else:
lowercase : Any =len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) )
if return_type == ReturnType.FULL_TEXT:
lowercase : str =prompt_text + text[prompt_length:]
else:
lowercase : List[Any] =text[prompt_length:]
lowercase : Tuple ={'''generated_text''': all_text}
records.append(_a )
return records
| 92 |
def lowerCamelCase( a__ ,a__ ,a__):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod
else:
_SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__)
return (b * b) % mod
# a prime number
snake_case_ : Union[str, Any] = 7_01
snake_case_ : int = 10_00_00_00_00
snake_case_ : str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 691 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowerCAmelCase_ : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowerCamelCase_ ( UpperCamelCase__ ):
_lowerCAmelCase : Dict = field(default=UpperCamelCase__ , metadata={'help': 'Whether to use SortishSampler or not.'} )
_lowerCAmelCase : List[Any] = field(
default=UpperCamelCase__ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
_lowerCAmelCase : str = field(
default=UpperCamelCase__ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
_lowerCAmelCase : Tuple = field(
default=UpperCamelCase__ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
_lowerCAmelCase : Any = field(
default=UpperCamelCase__ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = super().to_dict()
for k, v in d.items():
if isinstance(_a , _a ):
SCREAMING_SNAKE_CASE : int = v.to_dict()
return d
| 527 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , _a : int , _a : Optional[Any]=3 , _a : Tuple=32 , _a : Any=3 , _a : Union[str, Any]=10 , _a : Optional[int]=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[Any]=True , _a : int=True , _a : Tuple="relu" , _a : Optional[Any]=3 , _a : str=None , _a : List[Any]=["stage2", "stage3", "stage4"] , _a : Union[str, Any]=[2, 3, 4] , _a : Dict=1 , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : str , _a : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , _a : Any , _a : str , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_a : Any , _a : Optional[int] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self ) | 691 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
_UpperCamelCase = TypeVar('''T''')
_UpperCamelCase = TypeVar('''U''')
class _lowerCamelCase ( Generic[T, U] ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__snake_case : int = key
__snake_case : List[str] = val
__snake_case : Optional[int] = None
__snake_case : Union[str, Any] = None
def __repr__( self ) -> str:
'''simple docstring'''
return (
F"""Node: key: {self.key}, val: {self.val}, """
F"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _lowerCamelCase ( Generic[T, U] ):
"""simple docstring"""
def __init__( self ) -> None:
'''simple docstring'''
__snake_case : Optional[int] = DoubleLinkedListNode(_a , _a )
__snake_case : Optional[int] = DoubleLinkedListNode(_a , _a )
__snake_case , __snake_case : str = self.rear, self.head
def __repr__( self ) -> str:
'''simple docstring'''
__snake_case : Any = ["DoubleLinkedList"]
__snake_case : Union[str, Any] = self.head
while node.next is not None:
rep.append(str(_a ) )
__snake_case : int = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_a )
def UpperCAmelCase ( self , UpperCAmelCase ) -> None:
'''simple docstring'''
__snake_case : str = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__snake_case : Optional[Any] = node
__snake_case : Dict = previous
__snake_case : Any = node
__snake_case : Dict = self.rear
def UpperCAmelCase ( self , UpperCAmelCase ) -> DoubleLinkedListNode[T, U] | None:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__snake_case : int = node.next
__snake_case : Dict = node.prev
__snake_case : List[Any] = None
__snake_case : str = None
return node
class _lowerCamelCase ( Generic[T, U] ):
"""simple docstring"""
UpperCAmelCase_ : List[str] ={}
def __init__( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
__snake_case : List[str] = DoubleLinkedList()
__snake_case : int = capacity
__snake_case : List[str] = 0
__snake_case : Optional[Any] = 0
__snake_case : List[Any] = 0
__snake_case : str = {}
def __repr__( self ) -> str:
'''simple docstring'''
return (
F"""CacheInfo(hits={self.hits}, misses={self.miss}, """
F"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self , UpperCAmelCase ) -> bool:
'''simple docstring'''
return key in self.cache
def UpperCAmelCase ( self , UpperCAmelCase ) -> U | None:
'''simple docstring'''
if key in self.cache:
self.hits += 1
__snake_case : int = self.cache[key]
__snake_case : Optional[Any] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_a )
return node.val
self.miss += 1
return None
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> None:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__snake_case : Optional[int] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__snake_case : List[str] = DoubleLinkedListNode(_a , _a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__snake_case : List[str] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__snake_case : Optional[int] = value
self.list.add(_a )
@classmethod
def UpperCAmelCase ( cls , UpperCAmelCase = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
'''simple docstring'''
def cache_decorator_inner(UpperCAmelCase ) -> Callable[..., U]:
def cache_decorator_wrapper(*UpperCAmelCase ) -> U:
if func not in cls.decorator_function_to_instance_map:
__snake_case : str = LRUCache(_a )
__snake_case : Optional[int] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__snake_case : Any = func(*_a )
cls.decorator_function_to_instance_map[func].put(args[0] , _a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_a , "cache_info" , _a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 243 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self : int ):
"""simple docstring"""
__snake_case = 0
def UpperCamelCase_ ( self : List[str] ):
"""simple docstring"""
__snake_case = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(_a ,_a )
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(_a ) / "preprocessor_config.json"
__snake_case = Path(_a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} ,open(_a ,"w" ) ,)
json.dump({"model_type": "clip"} ,open(_a ,"w" ) )
__snake_case = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a ,_a )
def UpperCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(_a ) / "preprocessor_config.json"
__snake_case = Path(_a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} ,open(_a ,"w" ) ,)
json.dump({"model_type": "clip"} ,open(_a ,"w" ) )
__snake_case = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a ,_a )
def UpperCamelCase_ ( self : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__snake_case = Path(_a ) / "preprocessor_config.json"
__snake_case = Path(_a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} ,open(_a ,"w" ) ,)
json.dump({"model_type": "clip"} ,open(_a ,"w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__snake_case = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop("image_processor_type" )
__snake_case = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
__snake_case = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
__snake_case = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(_a ,_a )
def UpperCamelCase_ ( self : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(_a ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} ,open(_a ,"w" ) ,)
__snake_case = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a ,_a )
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
_a ,"clip-base is not a local folder and is not a valid model identifier" ):
__snake_case = AutoImageProcessor.from_pretrained("clip-base" )
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
_a ,r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__snake_case = AutoImageProcessor.from_pretrained(_a ,revision="aaaaaa" )
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
_a ,"hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." ,):
__snake_case = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(_a ):
__snake_case = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
__snake_case = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" ,trust_remote_code=_a )
__snake_case = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" ,trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ ,"NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
__snake_case = AutoImageProcessor.from_pretrained(_a ,trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ ,"NewImageProcessor" )
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
try:
AutoConfig.register("custom" ,_a )
AutoImageProcessor.register(_a ,_a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a ,_a )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(_a ) / "preprocessor_config.json"
__snake_case = Path(_a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} ,open(_a ,"w" ) ,)
json.dump({"model_type": "clip"} ,open(_a ,"w" ) )
__snake_case = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
__snake_case = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a ,_a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase_ ( self : str ):
"""simple docstring"""
class UpperCamelCase ( UpperCamelCase__ ):
__UpperCamelCase = True
try:
AutoConfig.register("custom" ,_a )
AutoImageProcessor.register(_a ,_a )
# If remote code is not set, the default is to use local
__snake_case = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ ,"NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__snake_case = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" ,trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ ,"NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__snake_case = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" ,trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ ,"NewImageProcessor" )
self.assertTrue(not hasattr(_a ,"is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 524 |
from manim import *
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_SCREAMING_SNAKE_CASE =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait() | 691 | 0 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase : str = logging.get_logger("""transformers.models.speecht5""")
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
hf_model.apply_weight_norm()
snake_case_ = checkpoint['input_conv.weight_g']
snake_case_ = checkpoint['input_conv.weight_v']
snake_case_ = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
snake_case_ = checkpoint[F'''upsamples.{i}.1.weight_g''']
snake_case_ = checkpoint[F'''upsamples.{i}.1.weight_v''']
snake_case_ = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
snake_case_ = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
snake_case_ = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
snake_case_ = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
snake_case_ = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
snake_case_ = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
snake_case_ = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
snake_case_ = checkpoint['output_conv.1.weight_g']
snake_case_ = checkpoint['output_conv.1.weight_v']
snake_case_ = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
snake_case_ = SpeechTaHifiGanConfig.from_pretrained(a__ )
else:
snake_case_ = SpeechTaHifiGanConfig()
snake_case_ = SpeechTaHifiGan(a__ )
snake_case_ = torch.load(a__ )
load_weights(orig_checkpoint['model']['generator'] , a__ , a__ )
snake_case_ = np.load(a__ )
snake_case_ = stats[0].reshape(-1 )
snake_case_ = stats[1].reshape(-1 )
snake_case_ = torch.from_numpy(a__ ).float()
snake_case_ = torch.from_numpy(a__ ).float()
model.save_pretrained(a__ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(a__ )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 362 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case_ : List[str] = {'''facebook/blenderbot-3B''': 1_28}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = BlenderbotTokenizer
def __init__( self : Dict , _a : str=None , _a : Optional[int]=None , _a : List[str]=None , _a : int="replace" , _a : Dict="<s>" , _a : Optional[Any]="</s>" , _a : Any="</s>" , _a : int="<s>" , _a : int="<unk>" , _a : Optional[int]="<pad>" , _a : Tuple="<mask>" , _a : Tuple=False , _a : Union[str, Any]=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE ='''post_processor'''
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''sep'''] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''cls'''] )
_SCREAMING_SNAKE_CASE =False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Optional[Any] , *_a : str , **_a : int ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : List[Any] , *_a : Optional[int] , **_a : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Dict , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Any , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_SCREAMING_SNAKE_CASE =''' '''.join(_a )
_SCREAMING_SNAKE_CASE =self.encode(_a )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 691 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
return base * power(a__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
_A = int(input("""Enter the base: """).strip())
_A = int(input("""Enter the exponent: """).strip())
_A = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_A = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 182 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> List[str]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] , **_a : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 691 | 0 |
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCAmelCase : Any = False
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Any = '''ybelkada/fonts'''
def lowerCamelCase ( ) -> str:
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(a__ , ["""torch"""] )
_check_torch_version()
__UpperCAmelCase : Dict = image_tensor.unsqueeze(0 )
__UpperCAmelCase : Any = torch.nn.functional.unfold(a__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__UpperCAmelCase : Optional[Any] = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , a__ , a__ , -1 )
__UpperCAmelCase : int = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int = 3_6 , _UpperCamelCase : Optional[Any] = "black" , _UpperCamelCase : List[str] = "white" , _UpperCamelCase : Optional[int] = 5 , _UpperCamelCase : str = 5 , _UpperCamelCase : Any = 5 , _UpperCamelCase : Dict = 5 , _UpperCamelCase : Tuple = None , _UpperCamelCase : int = None , ) -> Optional[int]:
'''simple docstring'''
requires_backends(a__ , """vision""" )
# Add new lines so that each line is no more than 80 characters.
__UpperCAmelCase : Optional[int] = textwrap.TextWrapper(width=8_0 )
__UpperCAmelCase : Dict = wrapper.wrap(text=a__ )
__UpperCAmelCase : List[Any] = """\n""".join(a__ )
if font_bytes is not None and font_path is None:
__UpperCAmelCase : Optional[int] = io.BytesIO(a__ )
elif font_path is not None:
__UpperCAmelCase : Optional[int] = font_path
else:
__UpperCAmelCase : int = hf_hub_download(a__ , """Arial.TTF""" )
__UpperCAmelCase : int = ImageFont.truetype(a__ , encoding="""UTF-8""" , size=a__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__UpperCAmelCase : Dict = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , a__ ) )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = temp_draw.textbbox((0, 0) , a__ , a__ )
# Create the actual image with a bit of padding around the text.
__UpperCAmelCase : Dict = text_width + left_padding + right_padding
__UpperCAmelCase : List[str] = text_height + top_padding + bottom_padding
__UpperCAmelCase : Optional[int] = Image.new("""RGB""" , (image_width, image_height) , a__ )
__UpperCAmelCase : Union[str, Any] = ImageDraw.Draw(a__ )
draw.text(xy=(left_padding, top_padding) , text=a__ , fill=a__ , font=a__ )
return image
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : str , **_UpperCamelCase : str ) -> Optional[Any]:
'''simple docstring'''
requires_backends(a__ , """vision""" )
# Convert to PIL image if necessary
__UpperCAmelCase : Optional[int] = to_pil_image(a__ )
__UpperCAmelCase : Dict = render_text(a__ , **a__ )
__UpperCAmelCase : Union[str, Any] = max(header_image.width , image.width )
__UpperCAmelCase : List[Any] = int(image.height * (new_width / image.width) )
__UpperCAmelCase : List[Any] = int(header_image.height * (new_width / header_image.width) )
__UpperCAmelCase : Any = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__UpperCAmelCase : Optional[int] = to_numpy_array(a__ )
if infer_channel_dimension_format(a__ ) == ChannelDimension.LAST:
__UpperCAmelCase : int = to_channel_dimension_format(a__ , ChannelDimension.LAST )
return new_image
class lowerCamelCase__ ( UpperCamelCase__ ):
"""simple docstring"""
__a = ["""flattened_patches"""]
def __init__( self : Optional[int] , UpperCamelCase : bool = True , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : int = 2_048 , UpperCamelCase : bool = False , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**_a )
__UpperCAmelCase : str = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
__UpperCAmelCase : str = do_normalize
__UpperCAmelCase : List[str] = do_convert_rgb
__UpperCAmelCase : Optional[Any] = max_patches
__UpperCAmelCase : str = is_vqa
def lowerCamelCase__ ( self : str , UpperCamelCase : np.ndarray , UpperCamelCase : int , UpperCamelCase : dict , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
__UpperCAmelCase : Dict = to_channel_dimension_format(_a , ChannelDimension.FIRST )
__UpperCAmelCase : Dict = torch.from_numpy(_a )
__UpperCAmelCase ,__UpperCAmelCase : List[str] = patch_size["""height"""], patch_size["""width"""]
__UpperCAmelCase ,__UpperCAmelCase : str = get_image_size(_a )
# maximize scale s.t.
__UpperCAmelCase : Optional[int] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__UpperCAmelCase : Any = max(min(math.floor(scale * image_height / patch_height ) , _a ) , 1 )
__UpperCAmelCase : Tuple = max(min(math.floor(scale * image_width / patch_width ) , _a ) , 1 )
__UpperCAmelCase : List[str] = max(num_feasible_rows * patch_height , 1 )
__UpperCAmelCase : Tuple = max(num_feasible_cols * patch_width , 1 )
__UpperCAmelCase : List[str] = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=_a , antialias=_a , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__UpperCAmelCase : Optional[int] = torch_extract_patches(_a , _a , _a )
__UpperCAmelCase : List[Any] = patches.shape
__UpperCAmelCase : Optional[int] = patches_shape[1]
__UpperCAmelCase : Tuple = patches_shape[2]
__UpperCAmelCase : int = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__UpperCAmelCase : List[str] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__UpperCAmelCase : Tuple = torch.arange(_a ).reshape([rows, 1] ).repeat(1 , _a ).reshape([rows * columns, 1] )
__UpperCAmelCase : Optional[Any] = torch.arange(_a ).reshape([1, columns] ).repeat(_a , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__UpperCAmelCase : Dict = row_ids.to(torch.floataa )
__UpperCAmelCase : Union[str, Any] = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__UpperCAmelCase : Optional[int] = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__UpperCAmelCase : Dict = torch.nn.functional.pad(_a , [0, 0, 0, max_patches - (rows * columns)] ).float()
__UpperCAmelCase : Optional[int] = to_numpy_array(_a )
return result
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : str ):
'''simple docstring'''
if image.dtype == np.uinta:
__UpperCAmelCase : List[str] = image.astype(np.floataa )
# take mean across the whole `image`
__UpperCAmelCase : Union[str, Any] = np.mean(_a )
__UpperCAmelCase : int = np.std(_a )
__UpperCAmelCase : Union[str, Any] = max(_a , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_a , mean=_a , std=_a , **_a )
def lowerCamelCase__ ( self : Any , UpperCamelCase : ImageInput , UpperCamelCase : Optional[str] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Dict[str, int]] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase : Tuple , ):
'''simple docstring'''
__UpperCAmelCase : str = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCAmelCase : List[str] = patch_size if patch_size is not None else self.patch_size
__UpperCAmelCase : int = max_patches if max_patches is not None else self.max_patches
__UpperCAmelCase : List[Any] = self.is_vqa
if kwargs.get("""data_format""" , _a ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
__UpperCAmelCase : Optional[int] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCAmelCase : Any = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
__UpperCAmelCase : int = [to_numpy_array(_a ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
__UpperCAmelCase : Tuple = kwargs.pop("""font_bytes""" , _a )
__UpperCAmelCase : int = kwargs.pop("""font_path""" , _a )
if isinstance(_a , _a ):
__UpperCAmelCase : Optional[Any] = [header_text] * len(_a )
__UpperCAmelCase : int = [
render_header(_a , header_text[i] , font_bytes=_a , font_path=_a )
for i, image in enumerate(_a )
]
if do_normalize:
__UpperCAmelCase : str = [self.normalize(image=_a ) for image in images]
# convert to torch tensor and permute
__UpperCAmelCase : List[str] = [
self.extract_flattened_patches(image=_a , max_patches=_a , patch_size=_a )
for image in images
]
# create attention mask in numpy
__UpperCAmelCase : List[str] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__UpperCAmelCase : List[Any] = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=_a )
return encoded_outputs
| 139 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 | 0 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _UpperCAmelCase ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] ):
"""simple docstring"""
assert isinstance(a__ , a__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ):
"""simple docstring"""
__lowerCamelCase : List[Any] = tmp_path / """cache"""
__lowerCamelCase : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCamelCase : Union[str, Any] = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=a__ , keep_in_memory=a__ ).read()
_check_sql_dataset(a__ , a__ )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _UpperCAmelCase ( UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = tmp_path / """cache"""
__lowerCamelCase : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__lowerCamelCase : Union[str, Any] = features.copy() if features else default_expected_features
__lowerCamelCase : List[str] = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCamelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=a__ , cache_dir=a__ ).read()
_check_sql_dataset(a__ , a__ )
def _UpperCAmelCase ( UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
with contextlib.closing(sqlitea.connect(a__ ) ) as con:
__lowerCamelCase : Optional[Any] = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def _UpperCAmelCase ( UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ):
"""simple docstring"""
__lowerCamelCase : List[str] = tmp_path / """cache"""
__lowerCamelCase : List[str] = os.path.join(a__ , """tmp.sql""" )
__lowerCamelCase : List[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=a__ ).read()
SqlDatasetWriter(a__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__lowerCamelCase : List[str] = iter_sql_file(a__ )
__lowerCamelCase : Optional[Any] = iter_sql_file(a__ )
for rowa, rowa in zip(a__ , a__ ):
assert rowa == rowa
@require_sqlalchemy
def _UpperCAmelCase ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
"""simple docstring"""
__lowerCamelCase : int = tmp_path / """cache"""
__lowerCamelCase : Tuple = os.path.join(a__ , """tmp.sql""" )
__lowerCamelCase : Union[str, Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=a__ ).read()
SqlDatasetWriter(a__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__lowerCamelCase : int = iter_sql_file(a__ )
__lowerCamelCase : str = iter_sql_file(a__ )
for rowa, rowa in zip(a__ , a__ ):
assert rowa == rowa
@require_sqlalchemy
def _UpperCAmelCase ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = tmp_path / """cache"""
__lowerCamelCase : str = os.path.join(a__ , """tmp.sql""" )
__lowerCamelCase : int = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=a__ ).read()
with pytest.raises(a__ ):
SqlDatasetWriter(a__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 519 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
SCREAMING_SNAKE_CASE : str = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def __A ( _A ):
"""simple docstring"""
__a = {}
state_dict.pop("pixel_mean" , a__ )
state_dict.pop("pixel_std" , a__ )
__a = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__a = key.replace(a__ , a__ )
if re.match(a__ , a__ ):
__a = int(re.match(a__ , a__ ).group(2 ) )
if layer_nb == 0:
__a = key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
__a = key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
__a = key.replace("layers.2" , "proj_out" )
__a = value
__a = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def __A ( _A , _A , _A , _A="ybelkada/segment-anything" ):
"""simple docstring"""
__a = hf_hub_download(a__ , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
__a = SamConfig()
elif "sam_vit_l" in model_name:
__a = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__a = SamConfig(
vision_config=a__ , )
elif "sam_vit_h" in model_name:
__a = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__a = SamConfig(
vision_config=a__ , )
__a = torch.load(a__ , map_location="cpu" )
__a = replace_keys(a__ )
__a = SamImageProcessor()
__a = SamProcessor(image_processor=a__ )
__a = SamModel(a__ )
hf_model.load_state_dict(a__ )
__a = hf_model.to("cuda" )
__a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
__a = Image.open(requests.get(a__ , stream=a__ ).raw ).convert("RGB" )
__a = [[[400, 650]]]
__a = [[1]]
__a = processor(images=np.array(a__ ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
__a = hf_model(**a__ )
__a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
__a = processor(
images=np.array(a__ ) , input_points=a__ , input_labels=a__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
__a = hf_model(**a__ )
__a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
__a = ((75, 275, 1725, 850),)
__a = processor(images=np.array(a__ ) , input_boxes=a__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
__a = hf_model(**a__ )
__a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
__a = [[[400, 650], [800, 650]]]
__a = [[1, 1]]
__a = processor(
images=np.array(a__ ) , input_points=a__ , input_labels=a__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
__a = hf_model(**a__ )
__a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE : str = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 197 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset | 691 | 0 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {'''facebook/bart-base''': BartForConditionalGeneration}
UpperCAmelCase__ = {'''facebook/bart-base''': BartTokenizer}
def _a ( ) -> List[Any]:
a = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=a__ , default=a__ , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=a__ , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=a__ , default=a__ , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=a__ , )
parser.add_argument(
'''--config_name''' , type=a__ , default=a__ , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=a__ , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=a__ , default=a__ , help='''Where to store the final ONNX file.''' )
a = parser.parse_args()
return args
def _a ( a :Any , a :Optional[Any]="cpu" ) -> Any:
a = model_dict[model_name].from_pretrained(a__ ).to(a__ )
a = tokenizer_dict[model_name].from_pretrained(a__ )
if model_name in ["facebook/bart-base"]:
a = 0
a = None
a = 0
return huggingface_model, tokenizer
def _a ( a :Optional[Any] , a :Tuple , a :Optional[int] , a :int , a :Any ) -> List[Any]:
model.eval()
a = None
a = torch.jit.script(BARTBeamSearchGenerator(a__ ) )
with torch.no_grad():
a = '''My friends are cool but they eat too many carbs.'''
a = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='''pt''' ).to(model.device )
a = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=a__ , max_length=a__ , early_stopping=a__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
a__ , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , a__ , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=a__ , )
logger.info('''Model exported to {}'''.format(a__ ) )
a = remove_dup_initializers(os.path.abspath(a__ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(a__ ) )
a = onnxruntime.InferenceSession(a__ )
a = ort_sess.run(
a__ , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(a__ ),
'''max_length''': np.array(a__ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _a ( ) -> Any:
a = parse_args()
a = 5
a = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
a = torch.device(args.device )
a , a = load_model_tokenizer(args.model_name_or_path , a__ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(a__ )
if args.max_length:
a = args.max_length
if args.num_beams:
a = args.num_beams
if args.output_file_path:
a = args.output_file_path
else:
a = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(a__ , a__ , a__ , a__ , a__ )
if __name__ == "__main__":
main()
| 117 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A : Tuple = random.Random()
if is_torch_available():
import torch
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Any:
'''simple docstring'''
if rng is None:
__snake_case = global_rng
__snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase( unittest.TestCase ):
def __init__( self : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any]=7 , SCREAMING_SNAKE_CASE : str=4_0_0 , SCREAMING_SNAKE_CASE : Union[str, Any]=2_0_0_0 , SCREAMING_SNAKE_CASE : Union[str, Any]=1 , SCREAMING_SNAKE_CASE : int=0.0 , SCREAMING_SNAKE_CASE : Any=1_6_0_0_0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : str=True , ) -> str:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = min_seq_length
__snake_case = max_seq_length
__snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case = feature_size
__snake_case = padding_value
__snake_case = sampling_rate
__snake_case = return_attention_mask
__snake_case = do_normalize
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> Tuple:
'''simple docstring'''
def _flatten(SCREAMING_SNAKE_CASE : Dict ):
return list(itertools.chain(*_a ) )
if equal_length:
__snake_case = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__snake_case = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase( UpperCamelCase__ , unittest.TestCase ):
snake_case_ : Union[str, Any] = ASTFeatureExtractor
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
__snake_case = ASTFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
__snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__snake_case = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
__snake_case = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__snake_case = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
__snake_case = feat_extract(_a , padding=_a , return_tensors="np" ).input_values
__snake_case = feat_extract(_a , padding=_a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__snake_case = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__snake_case = np.asarray(_a )
__snake_case = feat_extract(_a , return_tensors="np" ).input_values
__snake_case = feat_extract(_a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
import torch
__snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case = np.random.rand(1_0_0 ).astype(np.floataa )
__snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__snake_case = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__snake_case = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
from datasets import load_dataset
__snake_case = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__snake_case = ds.sort("id" ).select(range(_a ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
'''simple docstring'''
__snake_case = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__snake_case = self._load_datasamples(1 )
__snake_case = ASTFeatureExtractor()
__snake_case = feature_extractor(_a , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _a , atol=1e-4 ) )
| 371 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] ) -> str:
lowercase : List[str] =sorted(numsa + numsa )
lowercase , lowercase : Dict =divmod(len(a__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = [float(x) for x in input("""Enter the elements of first array: """).split()]
UpperCamelCase_ = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 92 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 691 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ : Any = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ : List[Any] = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ : Optional[Any] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ : Tuple = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCAmelCase_ : Any = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCAmelCase_ : Union[str, Any] = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCAmelCase_ : Any = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase_ : Optional[Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase_ : str = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class lowerCamelCase_ ( UpperCamelCase__ ):
_lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
_lowerCAmelCase : Tuple = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : str = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowerCamelCase_ ( UpperCamelCase__ ):
_lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
_lowerCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Tuple = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Tuple = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : Any = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
lowerCAmelCase_ : Tuple = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
lowerCAmelCase_ : Optional[Any] = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(UpperCamelCase__ )
class lowerCamelCase_ :
def __call__( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Dict , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
SCREAMING_SNAKE_CASE : Union[str, Any] = titles if not isinstance(_a , _a ) else [titles]
SCREAMING_SNAKE_CASE : List[Any] = texts if not isinstance(_a , _a ) else [texts]
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_a )
SCREAMING_SNAKE_CASE : Optional[Any] = questions if not isinstance(_a , _a ) else [questions] * n_passages
if len(_a ) != len(_a ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.""" )
SCREAMING_SNAKE_CASE : str = super().__call__(_a , _a , padding=_a , truncation=_a )['''input_ids''']
SCREAMING_SNAKE_CASE : Dict = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )['''input_ids''']
SCREAMING_SNAKE_CASE : Optional[int] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE : Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE : str = attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def __lowercase ( self : int , lowerCAmelCase__ : BatchEncoding , lowerCAmelCase__ : DPRReaderOutput , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : int = 64 , lowerCAmelCase__ : int = 4 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = reader_output[:3]
SCREAMING_SNAKE_CASE : Any = len(_a )
SCREAMING_SNAKE_CASE : int = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE : List[str] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE : Any = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE : Optional[int] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE : Any = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE : str = len(_a )
SCREAMING_SNAKE_CASE : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowercase ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE : int = sorted(_a , key=lambda lowerCAmelCase__ : x[1] , reverse=_a )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE : int = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__ )
class lowerCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
_lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
_lowerCAmelCase : Dict = READER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : str = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : int = READER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase : List[Any] = ['input_ids', 'attention_mask']
| 527 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 | 0 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
_UpperCamelCase = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
_UpperCamelCase = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def lowerCAmelCase__( lowercase : List[str] ) -> Tuple:
__snake_case : Dict = (images / 2 + 0.5).clamp(0 , 1 )
__snake_case : Optional[int] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__snake_case : int = numpy_to_pil(a__ )
return images
def lowerCAmelCase__( lowercase : Optional[int] ) -> Dict:
if images.ndim == 3:
__snake_case : Optional[int] = images[None, ...]
__snake_case : str = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__snake_case : int = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
__snake_case : Dict = [Image.fromarray(a__ ) for image in images]
return pil_images
| 243 |
def lowerCamelCase( a__ ,a__):
return number | (1 << position)
def lowerCamelCase( a__ ,a__):
return number & ~(1 << position)
def lowerCamelCase( a__ ,a__):
return number ^ (1 << position)
def lowerCamelCase( a__ ,a__):
return ((number >> position) & 1) == 1
def lowerCamelCase( a__ ,a__):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.