code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 1 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : torch.FloatTensor
UpperCAmelCase_ : Optional[torch.FloatTensor] = None
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=0.999 , UpperCAmelCase_="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase : Optional[Any] = []
for i in range(UpperCAmelCase_ ):
UpperCAmelCase : int = i / num_diffusion_timesteps
UpperCAmelCase : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase_ ) / alpha_bar_fn(UpperCAmelCase_ ) , UpperCAmelCase_ ) )
return torch.tensor(UpperCAmelCase_ , dtype=torch.floataa )
class A_ ( _snake_case , _snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self : List[Any] , lowercase_ : int = 1_000 , lowercase_ : str = "fixed_small_log" , lowercase_ : bool = True , lowercase_ : Optional[float] = 1.0 , lowercase_ : str = "epsilon" , lowercase_ : str = "squaredcos_cap_v2" , ) -> List[str]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
UpperCAmelCase : Optional[Any] = betas_for_alpha_bar(lowercase_ )
UpperCAmelCase : Optional[Any] = 1.0 - self.betas
UpperCAmelCase : Any = torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase : List[str] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase : Optional[Any] = 1.0
# setable values
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Optional[Any] = torch.from_numpy(np.arange(0 , lowercase_ )[::-1].copy() )
UpperCAmelCase : str = variance_type
def UpperCAmelCase_ ( self : int , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None ) -> torch.FloatTensor:
return sample
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : int , lowercase_ : Union[str, torch.device] = None ) -> str:
UpperCAmelCase : List[Any] = num_inference_steps
UpperCAmelCase : Dict = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase : List[str] = (np.arange(0 , lowercase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase : Dict = torch.from_numpy(lowercase_ ).to(lowercase_ )
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : List[str]=None , lowercase_ : str=None ) -> str:
if prev_timestep is None:
UpperCAmelCase : int = t - 1
UpperCAmelCase : Optional[int] = self.alphas_cumprod[t]
UpperCAmelCase : Optional[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase : Any = 1 - alpha_prod_t
UpperCAmelCase : Any = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase : Dict = self.betas[t]
else:
UpperCAmelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase : List[str] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase : Dict = torch.log(torch.clamp(lowercase_ , min=1E-20 ) )
UpperCAmelCase : int = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase : int = variance.log()
UpperCAmelCase : str = beta.log()
UpperCAmelCase : Tuple = (predicted_variance + 1) / 2
UpperCAmelCase : Dict = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase_ ( self : Any , lowercase_ : torch.FloatTensor , lowercase_ : int , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None , lowercase_ : Any=None , lowercase_ : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
UpperCAmelCase : List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = torch.split(lowercase_ , sample.shape[1] , dim=1 )
else:
UpperCAmelCase : str = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase : List[str] = t - 1
UpperCAmelCase : Any = self.alphas_cumprod[t]
UpperCAmelCase : Tuple = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase : str = 1 - alpha_prod_t
UpperCAmelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase : List[str] = self.betas[t]
UpperCAmelCase : str = self.alphas[t]
else:
UpperCAmelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase : Any = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase : Optional[Any] = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase : int = torch.clamp(
lowercase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase : Optional[int] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase : List[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase : List[Any] = 0
if t > 0:
UpperCAmelCase : Tuple = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowercase_ , device=model_output.device )
UpperCAmelCase : List[str] = self._get_variance(
lowercase_ , predicted_variance=lowercase_ , prev_timestep=lowercase_ , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase : str = variance
elif self.variance_type == "learned_range":
UpperCAmelCase : List[str] = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
' for the UnCLIPScheduler.' )
UpperCAmelCase : List[str] = variance * variance_noise
UpperCAmelCase : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowercase_ , pred_original_sample=lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , lowercase_ : torch.IntTensor , ) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
UpperCAmelCase : List[Any] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase : List[Any] = timesteps.to(original_samples.device )
UpperCAmelCase : Tuple = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase : Optional[int] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase : Dict = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase : Dict = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase : List[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 695 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 695 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : int
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
def UpperCamelCase( ):
UpperCAmelCase : str = Node(1 )
UpperCAmelCase : Optional[Any] = Node(2 )
UpperCAmelCase : Dict = Node(3 )
UpperCAmelCase : int = Node(4 )
UpperCAmelCase : str = Node(5 )
return tree
def UpperCamelCase( UpperCAmelCase_ ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase( UpperCAmelCase_ ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase( UpperCAmelCase_ ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase( UpperCAmelCase_ ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : list[Any] = []
if root is None:
return output
UpperCAmelCase : int = deque([root] )
while process_queue:
UpperCAmelCase : str = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : list[Any] = []
def populate_output(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(UpperCAmelCase_ , UpperCAmelCase_ )
return output
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : list[Any] = []
def populate_output(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(UpperCAmelCase_ , UpperCAmelCase_ )
return output
def UpperCamelCase( UpperCAmelCase_ ):
if root is None:
return []
UpperCAmelCase : list[Sequence[Node | None]] = []
UpperCAmelCase : Any = 0
UpperCAmelCase : Optional[Any] = height(UpperCAmelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(UpperCAmelCase_ , UpperCAmelCase_ ) )
UpperCAmelCase : List[str] = 1
else:
output.append(get_nodes_from_right_to_left(UpperCAmelCase_ , UpperCAmelCase_ ) )
UpperCAmelCase : int = 0
return output
def UpperCamelCase( ): # Main function for testing.
UpperCAmelCase : Tuple = make_tree()
print(F"""In-order Traversal: {inorder(UpperCAmelCase_ )}""" )
print(F"""Pre-order Traversal: {preorder(UpperCAmelCase_ )}""" )
print(F"""Post-order Traversal: {postorder(UpperCAmelCase_ )}""" , '\n' )
print(F"""Height of Tree: {height(UpperCAmelCase_ )}""" , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(UpperCAmelCase_ ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(UpperCAmelCase_ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(UpperCAmelCase_ , level=UpperCAmelCase_ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 695 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 1 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 1 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowercase__ = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
lowercase__ = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
lowercase__ = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
lowercase__ = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
lowercase__ = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int ) -> List[Any]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : int=[1, 10, 100] , lowercase_ : List[Any]=4 , lowercase_ : Tuple=3.0 ) -> Optional[int]:
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowercase_ ) as executor:
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Any = Counter()
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : str = defaultdict(lowercase_ )
for task_id, (candidates, test_case) in enumerate(zip(lowercase_ , lowercase_ ) ):
for candidate in candidates:
UpperCAmelCase : int = candidate + '\n' + test_case
UpperCAmelCase : Dict = (test_program, timeout, task_id, completion_id[task_id])
UpperCAmelCase : Tuple = executor.submit(lowercase_ , *lowercase_ )
futures.append(lowercase_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowercase_ ):
UpperCAmelCase : Any = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
UpperCAmelCase , UpperCAmelCase : Optional[int] = [], []
for result in results.values():
result.sort()
UpperCAmelCase : List[Any] = [r[1]['passed'] for r in result]
total.append(len(lowercase_ ) )
correct.append(sum(lowercase_ ) )
UpperCAmelCase : Any = np.array(lowercase_ )
UpperCAmelCase : Dict = np.array(lowercase_ )
UpperCAmelCase : List[Any] = k
UpperCAmelCase : Dict = {f"""pass@{k}""": estimate_pass_at_k(lowercase_ , lowercase_ , lowercase_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
def estimator(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = itertools.repeat(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
else:
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = iter(UpperCAmelCase_ )
return np.array([estimator(int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) , UpperCAmelCase_ ) for n, c in zip(UpperCAmelCase_ , UpperCAmelCase_ )] )
| 695 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = botoa.client('iam' )
UpperCAmelCase : Dict = {
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=UpperCAmelCase_ , AssumeRolePolicyDocument=json.dumps(UpperCAmelCase_ , indent=2 ) )
UpperCAmelCase : str = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=UpperCAmelCase_ , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(UpperCAmelCase_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Dict = botoa.client('iam' )
return iam_client.get_role(RoleName=UpperCAmelCase_ )["Role"]["Arn"]
def UpperCamelCase( ):
UpperCAmelCase : Dict = _ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , UpperCAmelCase_ , )
UpperCAmelCase : Union[str, Any] = None
if credentials_configuration == 0:
UpperCAmelCase : Optional[int] = _ask_field('Enter your AWS Profile name: [default] ' , default='default' )
UpperCAmelCase : Optional[int] = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
UpperCAmelCase : Union[str, Any] = _ask_field('AWS Access Key ID: ' )
UpperCAmelCase : Any = aws_access_key_id
UpperCAmelCase : Optional[Any] = _ask_field('AWS Secret Access Key: ' )
UpperCAmelCase : Dict = aws_secret_access_key
UpperCAmelCase : int = _ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
UpperCAmelCase : List[str] = aws_region
UpperCAmelCase : Optional[Any] = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , UpperCAmelCase_ , )
if role_management == 0:
UpperCAmelCase : List[str] = _ask_field('Enter your IAM role name: ' )
else:
UpperCAmelCase : Optional[int] = 'accelerate_sagemaker_execution_role'
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(UpperCAmelCase_ )
UpperCAmelCase : Any = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=UpperCAmelCase_ , error_message='Please enter yes or no.' , )
UpperCAmelCase : int = None
if is_custom_docker_image:
UpperCAmelCase : Any = _ask_field('Enter your Docker image: ' , lambda UpperCAmelCase_ : str(UpperCAmelCase_ ).lower() )
UpperCAmelCase : Optional[int] = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=UpperCAmelCase_ , error_message='Please enter yes or no.' , )
UpperCAmelCase : List[str] = None
if is_sagemaker_inputs_enabled:
UpperCAmelCase : Any = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda UpperCAmelCase_ : str(UpperCAmelCase_ ).lower() , )
UpperCAmelCase : Any = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=UpperCAmelCase_ , error_message='Please enter yes or no.' , )
UpperCAmelCase : Tuple = None
if is_sagemaker_metrics_enabled:
UpperCAmelCase : Union[str, Any] = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda UpperCAmelCase_ : str(UpperCAmelCase_ ).lower() , )
UpperCAmelCase : Any = _ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
UpperCAmelCase : Optional[int] = {}
UpperCAmelCase : str = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=UpperCAmelCase_ , error_message='Please enter yes or no.' , )
if use_dynamo:
UpperCAmelCase : Union[str, Any] = 'dynamo_'
UpperCAmelCase : str = _ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
UpperCAmelCase : Dict = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=UpperCAmelCase_ , error_message='Please enter yes or no.' , )
if use_custom_options:
UpperCAmelCase : List[str] = _ask_options(
'Which mode do you want to use?' , UpperCAmelCase_ , lambda UpperCAmelCase_ : TORCH_DYNAMO_MODES[int(UpperCAmelCase_ )] , default='default' , )
UpperCAmelCase : Tuple = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=UpperCAmelCase_ , error_message='Please enter yes or no.' , )
UpperCAmelCase : str = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=UpperCAmelCase_ , error_message='Please enter yes or no.' , )
UpperCAmelCase : int = 'Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
UpperCAmelCase : Any = _ask_options(
UpperCAmelCase_ , UpperCAmelCase_ , lambda UpperCAmelCase_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(UpperCAmelCase_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
UpperCAmelCase : Dict = _ask_field(UpperCAmelCase_ , lambda UpperCAmelCase_ : str(UpperCAmelCase_ ).lower() , default='ml.p3.2xlarge' )
UpperCAmelCase : str = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
UpperCAmelCase : Optional[int] = _ask_field(
'How many machines do you want use? [1]: ' , UpperCAmelCase_ , default=1 , )
UpperCAmelCase : Tuple = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=UpperCAmelCase_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=UpperCAmelCase_ , use_cpu=UpperCAmelCase_ , dynamo_config=UpperCAmelCase_ , eca_instance_type=UpperCAmelCase_ , profile=UpperCAmelCase_ , region=UpperCAmelCase_ , iam_role_name=UpperCAmelCase_ , mixed_precision=UpperCAmelCase_ , num_machines=UpperCAmelCase_ , sagemaker_inputs_file=UpperCAmelCase_ , sagemaker_metrics_file=UpperCAmelCase_ , )
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return int((input_a, input_a).count(0 ) != 0 )
def UpperCamelCase( ):
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 1 |
'''simple docstring'''
from math import factorial
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('Please enter positive integers for n and k where n >= k' )
return factorial(UpperCAmelCase_ ) // (factorial(UpperCAmelCase_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
)
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Any = FileLock(str(tmpdir / 'foo.lock' ) )
UpperCAmelCase : Tuple = FileLock(str(tmpdir / 'foo.lock' ) )
UpperCAmelCase : List[Any] = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = 'a' * 10_00 + '.lock'
UpperCAmelCase : List[str] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
UpperCAmelCase : Union[str, Any] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 695 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=8 ):
UpperCAmelCase : List[str] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCAmelCase : List[Any] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : Dict , lowercase_ : MultilingualCLIP , lowercase_ : XLMRobertaTokenizer , lowercase_ : UNetaDConditionModel , lowercase_ : Union[DDIMScheduler, DDPMScheduler] , lowercase_ : VQModel , ) -> int:
super().__init__()
self.register_modules(
text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self : Any , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Dict ) -> Any:
if latents is None:
UpperCAmelCase : str = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase : Optional[Any] = latents.to(lowercase_ )
UpperCAmelCase : str = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any]=None , ) -> List[Any]:
UpperCAmelCase : int = len(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else 1
# get prompt text embeddings
UpperCAmelCase : int = self.tokenizer(
lowercase_ , padding='max_length' , truncation=lowercase_ , max_length=77 , return_attention_mask=lowercase_ , add_special_tokens=lowercase_ , return_tensors='pt' , )
UpperCAmelCase : List[str] = text_inputs.input_ids
UpperCAmelCase : List[Any] = self.tokenizer(lowercase_ , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowercase_ , lowercase_ ):
UpperCAmelCase : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCAmelCase : List[Any] = text_input_ids.to(lowercase_ )
UpperCAmelCase : List[Any] = text_inputs.attention_mask.to(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = self.text_encoder(
input_ids=lowercase_ , attention_mask=lowercase_ )
UpperCAmelCase : Tuple = prompt_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase : Optional[Any] = text_encoder_hidden_states.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase : Dict = text_mask.repeat_interleave(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase : List[str]
if negative_prompt is None:
UpperCAmelCase : List[str] = [''] * batch_size
elif type(lowercase_ ) is not type(lowercase_ ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_ )} !="""
f""" {type(lowercase_ )}.""" )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(lowercase_ ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(lowercase_ )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
UpperCAmelCase : str = negative_prompt
UpperCAmelCase : List[str] = self.tokenizer(
lowercase_ , padding='max_length' , max_length=77 , truncation=lowercase_ , return_attention_mask=lowercase_ , add_special_tokens=lowercase_ , return_tensors='pt' , )
UpperCAmelCase : Union[str, Any] = uncond_input.input_ids.to(lowercase_ )
UpperCAmelCase : Optional[Any] = uncond_input.attention_mask.to(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.text_encoder(
input_ids=lowercase_ , attention_mask=lowercase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : Optional[int] = negative_prompt_embeds.shape[1]
UpperCAmelCase : Union[str, Any] = negative_prompt_embeds.repeat(1 , lowercase_ )
UpperCAmelCase : int = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase_ )
UpperCAmelCase : Optional[int] = uncond_text_encoder_hidden_states.shape[1]
UpperCAmelCase : Optional[Any] = uncond_text_encoder_hidden_states.repeat(1 , lowercase_ , 1 )
UpperCAmelCase : Any = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowercase_ , -1 )
UpperCAmelCase : Dict = uncond_text_mask.repeat_interleave(lowercase_ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : Optional[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCAmelCase : Dict = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCAmelCase : Optional[int] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def UpperCAmelCase_ ( self : Dict , lowercase_ : int=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase : List[str] = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase : Tuple = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Optional[Any]=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase : int = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase : Tuple = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase : List[Any] = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
if self.safety_checker is not None:
UpperCAmelCase , UpperCAmelCase : List[Any] = cpu_offload_with_hook(self.safety_checker , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self : Dict , lowercase_ : Union[str, List[str]] , lowercase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ : Optional[Union[str, List[str]]] = None , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 100 , lowercase_ : float = 4.0 , lowercase_ : int = 1 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ) -> Any:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Any = 1
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Optional[int] = len(lowercase_ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowercase_ )}""" )
UpperCAmelCase : Any = self._execution_device
UpperCAmelCase : Dict = batch_size * num_images_per_prompt
UpperCAmelCase : Optional[Any] = guidance_scale > 1.0
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = self._encode_prompt(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : List[Any] = torch.cat(lowercase_ , dim=0 )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Tuple = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase : Any = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase : Tuple = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase : List[str] = self.scheduler.timesteps
UpperCAmelCase : str = self.unet.config.in_channels
UpperCAmelCase , UpperCAmelCase : Optional[int] = get_new_h_w(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : Union[str, Any] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
UpperCAmelCase : str = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase : List[Any] = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = variance_pred.chunk(2 )
UpperCAmelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Tuple = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , ).prev_sample
# post-processing
UpperCAmelCase : Union[str, Any] = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase : Tuple = image * 0.5 + 0.5
UpperCAmelCase : Dict = image.clamp(0 , 1 )
UpperCAmelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase : Optional[int] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 695 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """openai-gpt"""
UpperCAmelCase_ : Union[str, Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , lowercase_ : Union[str, Any]=40_478 , lowercase_ : Tuple=512 , lowercase_ : List[str]=768 , lowercase_ : int=12 , lowercase_ : Any=12 , lowercase_ : Optional[int]="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : str=0.1 , lowercase_ : Dict=0.1 , lowercase_ : str=1E-5 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Tuple="cls_index" , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=None , lowercase_ : Dict=True , lowercase_ : Any=0.1 , **lowercase_ : Union[str, Any] , ) -> Union[str, Any]:
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Tuple = n_positions
UpperCAmelCase : int = n_embd
UpperCAmelCase : Optional[Any] = n_layer
UpperCAmelCase : List[Any] = n_head
UpperCAmelCase : List[str] = afn
UpperCAmelCase : int = resid_pdrop
UpperCAmelCase : Optional[int] = embd_pdrop
UpperCAmelCase : Optional[Any] = attn_pdrop
UpperCAmelCase : Optional[Any] = layer_norm_epsilon
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Union[str, Any] = summary_type
UpperCAmelCase : int = summary_use_proj
UpperCAmelCase : Any = summary_activation
UpperCAmelCase : List[str] = summary_first_dropout
UpperCAmelCase : Optional[Any] = summary_proj_to_labels
super().__init__(**lowercase_ )
| 695 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = FunnelTokenizer
UpperCAmelCase_ : Any = FunnelTokenizerFast
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Any = True
def UpperCAmelCase_ ( self : str ) -> int:
super().setUp()
UpperCAmelCase : Any = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self : Any , **lowercase_ : List[str] ) -> List[Any]:
return FunnelTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self : str , **lowercase_ : Optional[Any] ) -> List[str]:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self : Any , lowercase_ : List[str] ) -> int:
UpperCAmelCase : List[Any] = 'UNwant\u00E9d,running'
UpperCAmelCase : Optional[Any] = 'unwanted, running'
return input_text, output_text
def UpperCAmelCase_ ( self : int ) -> int:
UpperCAmelCase : List[Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : int = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowercase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
UpperCAmelCase : str = tokenizer('UNwant\u00E9d,running' )
UpperCAmelCase : List[str] = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len )
UpperCAmelCase : Optional[int] = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
| 695 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 1 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowercase__ = True
except ImportError:
lowercase__ = False
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase( UpperCAmelCase_ ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class A_ ( _snake_case ):
'''simple docstring'''
@staticmethod
def UpperCAmelCase_ ( lowercase_ : ArgumentParser ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=lowercase_ , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=lowercase_ , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=lowercase_ )
def __init__( self : List[Any] , lowercase_ : bool , lowercase_ : str , lowercase_ : Any=None , *lowercase_ : List[Any] ) -> Optional[int]:
UpperCAmelCase : Optional[int] = testing
UpperCAmelCase : Union[str, Any] = testing_file
UpperCAmelCase : Optional[Any] = path
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCAmelCase : str = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(lowercase_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
UpperCAmelCase : List[str] = (
Path(lowercase_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCAmelCase : List[Any] = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowercase_ ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
UpperCAmelCase : Dict = json.load(lowercase_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowercase_ , extra_context=lowercase_ , )
UpperCAmelCase : str = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
UpperCAmelCase : Any = json.load(lowercase_ )
UpperCAmelCase : Optional[Any] = configuration['lowercase_modelname']
UpperCAmelCase : Union[str, Any] = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f"""{directory}/configuration.json""" )
UpperCAmelCase : Tuple = 'PyTorch' in generate_tensorflow_pytorch_and_flax
UpperCAmelCase : List[str] = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
UpperCAmelCase : List[Any] = 'Flax' in generate_tensorflow_pytorch_and_flax
UpperCAmelCase : Tuple = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(lowercase_ , exist_ok=lowercase_ )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=lowercase_ )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , 'w' ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(lowercase_ : Tuple ):
with open(lowercase_ , 'r' ) as f:
UpperCAmelCase : Any = f.readlines()
with open(lowercase_ , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowercase_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowercase_ : str , lowercase_ : str , lowercase_ : List[str] ):
# Create temp file
UpperCAmelCase , UpperCAmelCase : Tuple = mkstemp()
UpperCAmelCase : Dict = False
with fdopen(lowercase_ , 'w' ) as new_file:
with open(lowercase_ ) as old_file:
for line in old_file:
new_file.write(lowercase_ )
if line_to_copy_below in line:
UpperCAmelCase : Union[str, Any] = True
for line_to_copy in lines_to_copy:
new_file.write(lowercase_ )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(lowercase_ , lowercase_ )
# Remove original file
remove(lowercase_ )
# Move new file
move(lowercase_ , lowercase_ )
def skip_units(lowercase_ : int ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowercase_ : Optional[Any] ):
with open(lowercase_ ) as datafile:
UpperCAmelCase : Dict = []
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : str = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCAmelCase : List[str] = line.split('"' )[1]
UpperCAmelCase : Tuple = skip_units(lowercase_ )
elif "# Below: " in line and "##" not in line:
UpperCAmelCase : str = line.split('"' )[1]
UpperCAmelCase : Union[str, Any] = skip_units(lowercase_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : str = []
elif "# Replace with" in line and "##" not in line:
UpperCAmelCase : Tuple = []
elif "##" not in line:
lines_to_copy.append(lowercase_ )
remove(lowercase_ )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(lowercase_ )
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase__ = "sshleifer/mar_enro_6_3_student"
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
super().setUp()
UpperCAmelCase : Tuple = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=lowercase_ , )
UpperCAmelCase : str = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
MarianMTModel.from_pretrained(lowercase_ )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
UpperCAmelCase : List[str] = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
UpperCAmelCase : str = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase : Union[str, Any] = bash_script.replace(lowercase_ , str(lowercase_ ) )
UpperCAmelCase : str = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCAmelCase : Union[str, Any] = f"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCAmelCase : List[Any] = ['finetune.py'] + bash_script.split() + args
with patch.object(lowercase_ , 'argv' , lowercase_ ):
UpperCAmelCase : Tuple = argparse.ArgumentParser()
UpperCAmelCase : int = pl.Trainer.add_argparse_args(lowercase_ )
UpperCAmelCase : int = SummarizationModule.add_model_specific_args(lowercase_ , os.getcwd() )
UpperCAmelCase : Any = parser.parse_args()
UpperCAmelCase : Any = main(lowercase_ )
# Check metrics
UpperCAmelCase : Union[str, Any] = load_json(model.metrics_save_path )
UpperCAmelCase : Union[str, Any] = metrics['val'][0]
UpperCAmelCase : int = metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , lowercase_ )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase : Dict = os.listdir(lowercase_ )
UpperCAmelCase : Tuple = [x for x in contents if x.endswith('.ckpt' )][0]
UpperCAmelCase : Optional[Any] = os.path.join(args.output_dir , lowercase_ )
UpperCAmelCase : Tuple = torch.load(lowercase_ , map_location='cpu' )
UpperCAmelCase : int = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase : Tuple = {os.path.basename(lowercase_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class A_ ( _snake_case ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self : str ) -> Tuple:
UpperCAmelCase : int = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
UpperCAmelCase : Optional[int] = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
UpperCAmelCase : str = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
UpperCAmelCase : Optional[Any] = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
UpperCAmelCase : Any = bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase : Tuple = bash_script.replace(lowercase_ , str(lowercase_ ) )
UpperCAmelCase : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase : int = bash_script.replace('--fp16' , '' )
UpperCAmelCase : List[Any] = 6
UpperCAmelCase : List[Any] = (
['distillation.py']
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
'--gpus=1',
'--learning_rate=1e-3',
f"""--num_train_epochs={epochs}""",
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(lowercase_ , 'argv' , lowercase_ ):
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
UpperCAmelCase : Optional[Any] = pl.Trainer.add_argparse_args(lowercase_ )
UpperCAmelCase : Optional[Any] = SummarizationDistiller.add_model_specific_args(lowercase_ , os.getcwd() )
UpperCAmelCase : Optional[Any] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCAmelCase : Tuple = distill_main(lowercase_ )
# Check metrics
UpperCAmelCase : int = load_json(model.metrics_save_path )
UpperCAmelCase : int = metrics['val'][0]
UpperCAmelCase : int = metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , lowercase_ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase : List[str] = os.listdir(lowercase_ )
UpperCAmelCase : Union[str, Any] = [x for x in contents if x.endswith('.ckpt' )][0]
UpperCAmelCase : Dict = os.path.join(args.output_dir , lowercase_ )
UpperCAmelCase : int = torch.load(lowercase_ , map_location='cpu' )
UpperCAmelCase : Optional[int] = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase : Any = {os.path.basename(lowercase_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
lowercase__ = {"allegro/herbert-base-cased": 514}
lowercase__ = {}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = VOCAB_FILES_NAMES
UpperCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Union[str, Any] = HerbertTokenizer
def __init__( self : Optional[Any] , lowercase_ : List[Any]=None , lowercase_ : Tuple=None , lowercase_ : Tuple=None , lowercase_ : List[str]="<s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Any="<pad>" , lowercase_ : Tuple="<mask>" , lowercase_ : Dict="</s>" , **lowercase_ : str , ) -> Dict:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , sep_token=lowercase_ , **lowercase_ , )
def UpperCAmelCase_ ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : int = [self.cls_token_id]
UpperCAmelCase : Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : int = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """swin2sr"""
UpperCAmelCase_ : Union[str, Any] = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Dict , lowercase_ : Union[str, Any]=64 , lowercase_ : Dict=1 , lowercase_ : Optional[Any]=3 , lowercase_ : List[str]=180 , lowercase_ : Any=[6, 6, 6, 6, 6, 6] , lowercase_ : str=[6, 6, 6, 6, 6, 6] , lowercase_ : Optional[Any]=8 , lowercase_ : Union[str, Any]=2.0 , lowercase_ : Optional[Any]=True , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Dict=0.0 , lowercase_ : Dict=0.1 , lowercase_ : Dict="gelu" , lowercase_ : Dict=False , lowercase_ : Any=0.02 , lowercase_ : Optional[Any]=1E-5 , lowercase_ : List[Any]=2 , lowercase_ : List[Any]=1.0 , lowercase_ : int="1conv" , lowercase_ : Optional[Any]="pixelshuffle" , **lowercase_ : Union[str, Any] , ) -> List[str]:
super().__init__(**lowercase_ )
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : int = embed_dim
UpperCAmelCase : Optional[int] = depths
UpperCAmelCase : List[Any] = len(lowercase_ )
UpperCAmelCase : Optional[int] = num_heads
UpperCAmelCase : List[Any] = window_size
UpperCAmelCase : Tuple = mlp_ratio
UpperCAmelCase : Optional[int] = qkv_bias
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = drop_path_rate
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Tuple = use_absolute_embeddings
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Union[str, Any] = upscale
UpperCAmelCase : Optional[int] = img_range
UpperCAmelCase : List[str] = resi_connection
UpperCAmelCase : List[str] = upsampler
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = PegasusTokenizer
UpperCAmelCase_ : Optional[int] = PegasusTokenizerFast
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : List[str] = True
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[Any] = PegasusTokenizer(lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> str:
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def UpperCAmelCase_ ( self : Union[str, Any] , **lowercase_ : List[str] ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] ) -> Tuple:
return ("This is a test", "This is a test")
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
UpperCAmelCase : List[Any] = '</s>'
UpperCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCAmelCase_ ( self : Tuple ) -> str:
UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(lowercase_ ) , 1_103 )
def UpperCAmelCase_ ( self : int ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Union[str, Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
UpperCAmelCase : Any = rust_tokenizer([raw_input_str] , return_tensors=lowercase_ , add_special_tokens=lowercase_ ).input_ids[0]
UpperCAmelCase : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=lowercase_ , add_special_tokens=lowercase_ ).input_ids[0]
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
UpperCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase : List[Any] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
UpperCAmelCase : Tuple = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
UpperCAmelCase : List[str] = tokenizer([raw_input_str] , return_tensors=lowercase_ ).input_ids[0]
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
UpperCAmelCase : Any = 'To ensure a smooth flow of bank resolutions.'
UpperCAmelCase : int = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
UpperCAmelCase : Tuple = tokenizer([raw_input_str] , return_tensors=lowercase_ ).input_ids[0]
self.assertListEqual(lowercase_ , lowercase_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase_ ( self : str ) -> int:
UpperCAmelCase : Optional[Any] = ['This is going to be way too long.' * 150, 'short example']
UpperCAmelCase : int = ['not super long but more than 5 tokens', 'tiny']
UpperCAmelCase : int = self._large_tokenizer(lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors='pt' )
UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=lowercase_ , max_length=5 , padding=lowercase_ , truncation=lowercase_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(lowercase_ ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
# fmt: off
UpperCAmelCase : List[str] = {'input_ids': [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = PegasusTokenizer
UpperCAmelCase_ : List[str] = PegasusTokenizerFast
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Tuple = True
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(lowercase_ , offset=0 , mask_token_sent=lowercase_ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def UpperCAmelCase_ ( self : List[str] , **lowercase_ : Tuple ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Any ) -> str:
return ("This is a test", "This is a test")
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
UpperCAmelCase : List[Any] = rust_tokenizer([raw_input_str] , return_tensors=lowercase_ , add_special_tokens=lowercase_ ).input_ids[0]
UpperCAmelCase : List[str] = py_tokenizer([raw_input_str] , return_tensors=lowercase_ , add_special_tokens=lowercase_ ).input_ids[0]
self.assertListEqual(lowercase_ , lowercase_ )
@require_torch
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[Any] = ['This is going to be way too long.' * 1_000, 'short example']
UpperCAmelCase : int = ['not super long but more than 5 tokens', 'tiny']
UpperCAmelCase : Optional[int] = self._large_tokenizer(lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors='pt' )
UpperCAmelCase : List[str] = self._large_tokenizer(
text_target=lowercase_ , max_length=5 , padding=lowercase_ , truncation=lowercase_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(lowercase_ ) == 2 # input_ids, attention_mask.
def UpperCAmelCase_ ( self : str ) -> Any:
UpperCAmelCase : Dict = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
UpperCAmelCase : int = self._large_tokenizer(lowercase_ ).input_ids
self.assertListEqual(
lowercase_ , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
| 695 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=3 , lowercase_ : List[Any]=32 , lowercase_ : int=3 , lowercase_ : Tuple=10 , lowercase_ : Tuple=[10, 20, 30, 40] , lowercase_ : List[str]=[1, 1, 2, 1] , lowercase_ : Tuple=True , lowercase_ : Optional[int]=True , lowercase_ : Optional[int]="relu" , lowercase_ : Dict=3 , lowercase_ : Union[str, Any]=None , ) -> Dict:
UpperCAmelCase : str = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : Optional[Any] = embeddings_size
UpperCAmelCase : List[str] = hidden_sizes
UpperCAmelCase : Union[str, Any] = depths
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : str = use_labels
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Optional[Any] = num_labels
UpperCAmelCase : List[Any] = scope
UpperCAmelCase : Dict = len(lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Any:
UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : int = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any] ) -> int:
UpperCAmelCase : Tuple = TFResNetModel(config=lowercase_ )
UpperCAmelCase : List[str] = model(lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self : int , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : int ) -> str:
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : str = TFResNetForImageClassification(lowercase_ )
UpperCAmelCase : List[Any] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCAmelCase_ : Any = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase_ : int = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : int = False
UpperCAmelCase_ : Optional[Any] = False
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
UpperCAmelCase : Optional[int] = TFResNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(lowercase_ )
UpperCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[str] = [*signature.parameters.keys()]
UpperCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
def check_hidden_states_output(lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Tuple ):
UpperCAmelCase : Optional[Any] = model_class(lowercase_ )
UpperCAmelCase : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : Any = layer_type
UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = TFResNetModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self : Any ) -> Dict:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
UpperCAmelCase : Dict = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Optional[Any] = self.default_image_processor
UpperCAmelCase : str = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=lowercase_ , return_tensors='tf' )
# forward pass
UpperCAmelCase : Any = model(**lowercase_ )
# verify the logits
UpperCAmelCase : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowercase_ , atol=1E-4 ) )
| 695 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(UpperCAmelCase_ ).json()
def UpperCamelCase( UpperCAmelCase_ = 10 ):
UpperCAmelCase : str = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
UpperCAmelCase : List[str] = requests.get(UpperCAmelCase_ ).json()[:max_stories]
return [get_hackernews_story(UpperCAmelCase_ ) for story_id in story_ids]
def UpperCamelCase( UpperCAmelCase_ = 10 ):
UpperCAmelCase : Tuple = hackernews_top_stories(UpperCAmelCase_ )
return "\n".join('* [{title}]({url})'.format(**UpperCAmelCase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 695 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = """new-model"""
if is_tf_available():
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = NewModelConfig
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : Dict ) -> Dict:
UpperCAmelCase : str = 'bert-base-cased'
UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : Optional[int] = TFAutoModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase : Dict = 'bert-base-cased'
UpperCAmelCase : int = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Dict ) -> Any:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : str = TFAutoModelForCausalLM.from_pretrained(lowercase_ )
UpperCAmelCase , UpperCAmelCase : int = TFAutoModelForCausalLM.from_pretrained(lowercase_ , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = TFAutoModelForMaskedLM.from_pretrained(lowercase_ , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : int ) -> List[str]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Dict = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase_ , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase : Tuple = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = TFAutoModelForSequenceClassification.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase : Dict = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : Optional[int] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
@require_tensorflow_probability
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
UpperCAmelCase : str = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowercase_ , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase_ ) , 14_410 )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase_ ) , 14_410 )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
UpperCAmelCase : List[str] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase : List[Any] = copy.deepcopy(model.config )
UpperCAmelCase : Any = ['FunnelBaseModel']
UpperCAmelCase : Optional[Any] = TFAutoModel.from_config(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase_ )
UpperCAmelCase : Optional[Any] = TFAutoModel.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
try:
AutoConfig.register('new-model' , lowercase_ )
UpperCAmelCase : Optional[int] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowercase_ ):
auto_class.register(lowercase_ , lowercase_ )
auto_class.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
auto_class.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Optional[Any] = BertModelTester(self ).get_config()
UpperCAmelCase : Union[str, Any] = NewModelConfig(**tiny_config.to_dict() )
UpperCAmelCase : Optional[int] = auto_class.from_config(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase_ )
UpperCAmelCase : int = auto_class.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[Any] = TFAutoModel.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : List[str] = TFAutoModel.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
UpperCAmelCase : Optional[Any] = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
with self.assertRaisesRegex(lowercase_ , 'Use `from_pt=True` to load this model' ):
UpperCAmelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def UpperCAmelCase_ ( self : Any ) -> str:
# Make sure we have cached the model.
UpperCAmelCase : str = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
UpperCAmelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
UpperCAmelCase : Optional[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
UpperCAmelCase : Optional[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 695 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 1 |
'''simple docstring'''
import numpy
# List of input, output pairs
lowercase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowercase__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowercase__ = [2, 4, 1, 5]
lowercase__ = len(train_data)
lowercase__ = 0.009
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_="train" ):
return calculate_hypothesis_value(UpperCAmelCase_ , UpperCAmelCase_ ) - output(
UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : int = 0
for i in range(len(UpperCAmelCase_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=m ):
UpperCAmelCase : Dict = 0
for i in range(UpperCAmelCase_ ):
if index == -1:
summation_value += _error(UpperCAmelCase_ )
else:
summation_value += _error(UpperCAmelCase_ ) * train_data[i][0][index]
return summation_value
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = summation_of_cost_derivative(UpperCAmelCase_ , UpperCAmelCase_ ) / m
return cost_derivative_value
def UpperCamelCase( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase : List[str] = 0.00_0002
UpperCAmelCase : str = 0
UpperCAmelCase : str = 0
while True:
j += 1
UpperCAmelCase : Any = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase_ ) ):
UpperCAmelCase : List[Any] = get_cost_derivative(i - 1 )
UpperCAmelCase : Union[str, Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase_ , UpperCAmelCase_ , atol=UpperCAmelCase_ , rtol=UpperCAmelCase_ , ):
break
UpperCAmelCase : List[Any] = temp_parameter_vector
print(('Number of iterations:', j) )
def UpperCamelCase( ):
for i in range(len(UpperCAmelCase_ ) ):
print(('Actual output value:', output(UpperCAmelCase_ , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(UpperCAmelCase_ , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 695 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 1 |
'''simple docstring'''
import string
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = ''
for i in sequence:
UpperCAmelCase : Any = ord(UpperCAmelCase_ )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = string.ascii_letters
UpperCAmelCase : Any = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(UpperCAmelCase_ )] if c in letters else c for c in sequence )
def UpperCamelCase( ):
from timeit import timeit
print('Running performance benchmarks...' )
UpperCAmelCase : Any = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(F"""> atbash_slow(): {timeit("atbash_slow(printable)" , setup=UpperCAmelCase_ )} seconds""" )
print(F"""> atbash(): {timeit("atbash(printable)" , setup=UpperCAmelCase_ )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 695 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 695 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 1 |
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowercase__ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
lowercase__ = logging.WARNING
def UpperCamelCase( ):
UpperCAmelCase : Tuple = os.getenv('DATASETS_VERBOSITY' , UpperCAmelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCamelCase( ):
return __name__.split('.' )[0]
def UpperCamelCase( ):
return logging.getLogger(_get_library_name() )
def UpperCamelCase( ):
# Apply our default configuration to the library root logger.
UpperCAmelCase : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCamelCase( ):
UpperCAmelCase : Tuple = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCamelCase( UpperCAmelCase_ = None ):
if name is None:
UpperCAmelCase : List[Any] = _get_library_name()
return logging.getLogger(UpperCAmelCase_ )
def UpperCamelCase( ):
return _get_library_root_logger().getEffectiveLevel()
def UpperCamelCase( UpperCAmelCase_ ):
_get_library_root_logger().setLevel(UpperCAmelCase_ )
def UpperCamelCase( ):
return set_verbosity(UpperCAmelCase_ )
def UpperCamelCase( ):
return set_verbosity(UpperCAmelCase_ )
def UpperCamelCase( ):
return set_verbosity(UpperCAmelCase_ )
def UpperCamelCase( ):
return set_verbosity(UpperCAmelCase_ )
def UpperCamelCase( ):
UpperCAmelCase : int = False
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class A_ :
'''simple docstring'''
def __init__( self : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Any ) -> Optional[int]: # pylint: disable=unused-argument
UpperCAmelCase : List[Any] = args[0] if args else None
def __iter__( self : Tuple ) -> str:
return iter(self._iterator )
def __getattr__( self : List[str] , lowercase_ : Union[str, Any] ) -> str:
def empty_fn(*lowercase_ : Union[str, Any] , **lowercase_ : Tuple ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : str ) -> Union[str, Any]:
return self
def __exit__( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Tuple ) -> str:
return
lowercase__ = True
class A_ :
'''simple docstring'''
def __call__( self : Optional[int] , *lowercase_ : Tuple , lowercase_ : List[str]=False , **lowercase_ : Dict ) -> Union[str, Any]:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowercase_ , **lowercase_ )
else:
return EmptyTqdm(*lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , *lowercase_ : int , **lowercase_ : Any ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase__ = _tqdm_cls()
def UpperCamelCase( ):
global _tqdm_active
return bool(_tqdm_active )
def UpperCamelCase( ):
global _tqdm_active
UpperCAmelCase : Dict = True
def UpperCamelCase( ):
global _tqdm_active
UpperCAmelCase : Any = False
| 695 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 1 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase__ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def UpperCamelCase( UpperCAmelCase_ ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
UpperCAmelCase : List[Any] = list(s_dict.keys() )
for key in keys:
UpperCAmelCase : List[str] = R'.*/layers_(\d+)'
UpperCAmelCase : List[Any] = key
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = re.sub(R'layers_(\d+)' , R'block/\1/layer' , UpperCAmelCase_ )
UpperCAmelCase : List[Any] = R'(encoder|decoder)\/'
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = re.match(UpperCAmelCase_ , UpperCAmelCase_ ).groups()
if groups[0] == "encoder":
UpperCAmelCase : Optional[Any] = re.sub(R'/mlp/' , R'/1/mlp/' , UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , UpperCAmelCase_ )
elif groups[0] == "decoder":
UpperCAmelCase : Any = re.sub(R'/mlp/' , R'/2/mlp/' , UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , UpperCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase : Optional[int] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase : Optional[int] = s_dict.pop(UpperCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : Union[str, Any] = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : List[str] = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase : str = s_dict[key].shape[0]
UpperCAmelCase : List[str] = s_dict[key]
for idx in range(UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCAmelCase_ )
return s_dict
lowercase__ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase_ , 'r' ) as f:
UpperCAmelCase : Any = f.read()
UpperCAmelCase : Union[str, Any] = re.findall(R'(.*) = ([0-9.]*)' , UpperCAmelCase_ )
UpperCAmelCase : Dict = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase : Optional[Any] = float(UpperCAmelCase_ ) if '.' in value else int(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = re.findall(R'(.*activations) = \(\'(.*)\',\)' , UpperCAmelCase_ )[0]
UpperCAmelCase : Tuple = str(activation[1] )
UpperCAmelCase : Dict = num_experts
UpperCAmelCase : Any = SwitchTransformersConfig(**UpperCAmelCase_ )
return config
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_="./" , UpperCAmelCase_=8 ):
# Initialise PyTorch model
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase : Union[str, Any] = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
if gin_file is not None:
UpperCAmelCase : Optional[int] = convert_gin_to_config(UpperCAmelCase_ , UpperCAmelCase_ )
else:
UpperCAmelCase : int = SwitchTransformersConfig.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : Dict = SwitchTransformersForConditionalGeneration(UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = flax_params['target']
UpperCAmelCase : Tuple = flatten_dict(UpperCAmelCase_ , sep='/' )
UpperCAmelCase : List[str] = rename_keys(UpperCAmelCase_ )
UpperCAmelCase : Dict = unflatten_dict(UpperCAmelCase_ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowercase__ = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ["""sentencepiece"""]
def __init__( self : int , *lowercase_ : Optional[Any] , **lowercase_ : Tuple ) -> int:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Any = ["""sentencepiece"""]
def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[str] ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = ["""sentencepiece"""]
def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ["""sentencepiece"""]
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Tuple ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = ["""sentencepiece"""]
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *lowercase_ : Optional[int] , **lowercase_ : int ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ["""sentencepiece"""]
def __init__( self : str , *lowercase_ : Any , **lowercase_ : List[str] ) -> Any:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ["""sentencepiece"""]
def __init__( self : str , *lowercase_ : Dict , **lowercase_ : str ) -> int:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[Any] ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = ["""sentencepiece"""]
def __init__( self : Dict , *lowercase_ : List[Any] , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : int , *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = ["""sentencepiece"""]
def __init__( self : Any , *lowercase_ : Any , **lowercase_ : Any ) -> int:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = ["""sentencepiece"""]
def __init__( self : Tuple , *lowercase_ : Dict , **lowercase_ : Union[str, Any] ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Any ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *lowercase_ : str , **lowercase_ : Tuple ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = ["""sentencepiece"""]
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : int ) -> str:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ["""sentencepiece"""]
def __init__( self : int , *lowercase_ : List[Any] , **lowercase_ : Union[str, Any] ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = ["""sentencepiece"""]
def __init__( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : int ) -> str:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ["""sentencepiece"""]
def __init__( self : str , *lowercase_ : Any , **lowercase_ : int ) -> str:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *lowercase_ : Any , **lowercase_ : Dict ) -> str:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = ["""sentencepiece"""]
def __init__( self : int , *lowercase_ : Optional[Any] , **lowercase_ : str ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ["""sentencepiece"""]
def __init__( self : int , *lowercase_ : Any , **lowercase_ : Dict ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = ["""sentencepiece"""]
def __init__( self : List[Any] , *lowercase_ : int , **lowercase_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ["""sentencepiece"""]
def __init__( self : Any , *lowercase_ : int , **lowercase_ : List[str] ) -> Tuple:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ["""sentencepiece"""]
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : List[Any] ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = ["""sentencepiece"""]
def __init__( self : Optional[int] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : Dict ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ["""sentencepiece"""]
def __init__( self : Dict , *lowercase_ : Optional[Any] , **lowercase_ : str ) -> Any:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
lowercase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# Return True if there is node that has not iterated.
UpperCAmelCase : Any = [False] * len(UpperCAmelCase_ )
UpperCAmelCase : str = [s]
UpperCAmelCase : List[str] = True
while queue:
UpperCAmelCase : Dict = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Optional[int] = u
return visited[t]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = [-1] * (len(UpperCAmelCase_ ))
UpperCAmelCase : Any = 0
UpperCAmelCase : str = []
UpperCAmelCase : Union[str, Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = float('Inf' )
UpperCAmelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase : Optional[Any] = min(UpperCAmelCase_ , graph[parent[s]][s] )
UpperCAmelCase : List[Any] = parent[s]
max_flow += path_flow
UpperCAmelCase : Optional[Any] = sink
while v != source:
UpperCAmelCase : Tuple = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase : List[str] = parent[v]
for i in range(len(UpperCAmelCase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 695 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = BertTokenizer
UpperCAmelCase_ : Optional[int] = BertTokenizerFast
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[int] = filter_non_english
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
super().setUp()
UpperCAmelCase : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Any ) -> Tuple:
UpperCAmelCase : List[str] = 'UNwant\u00E9d,running'
UpperCAmelCase : Dict = 'unwanted, running'
return input_text, output_text
def UpperCAmelCase_ ( self : int ) -> Dict:
UpperCAmelCase : List[Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : Union[str, Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowercase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : int = self.get_rust_tokenizer()
UpperCAmelCase : Any = 'UNwant\u00E9d,running'
UpperCAmelCase : Tuple = tokenizer.tokenize(lowercase_ )
UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase : int = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase : List[str] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase : Any = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(lowercase_ )
UpperCAmelCase : Tuple = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# With lower casing
UpperCAmelCase : int = self.get_tokenizer(do_lower_case=lowercase_ )
UpperCAmelCase : Optional[int] = self.get_rust_tokenizer(do_lower_case=lowercase_ )
UpperCAmelCase : Tuple = 'UNwant\u00E9d,running'
UpperCAmelCase : Optional[Any] = tokenizer.tokenize(lowercase_ )
UpperCAmelCase : Optional[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase : Any = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = self.get_rust_tokenizer()
UpperCAmelCase : Optional[int] = tokenizer.encode(lowercase_ )
UpperCAmelCase : Optional[Any] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase : int = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase : str = BasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self : int ) -> Tuple:
UpperCAmelCase : str = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def UpperCAmelCase_ ( self : str ) -> List[Any]:
UpperCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase : str = BasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase : str = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self : List[str] ) -> str:
UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self : Dict ) -> int:
UpperCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=lowercase_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
UpperCAmelCase : Any = BasicTokenizer()
UpperCAmelCase : Tuple = 'a\n\'ll !!to?\'d of, can\'t.'
UpperCAmelCase : Any = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowercase_ ) , lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase : List[str] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
UpperCAmelCase : Optional[Any] = {}
for i, token in enumerate(lowercase_ ):
UpperCAmelCase : List[Any] = i
UpperCAmelCase : List[str] = WordpieceTokenizer(vocab=lowercase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCAmelCase_ ( self : Dict ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
UpperCAmelCase : Any = self.get_tokenizer()
UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained('bert-base-uncased' )
UpperCAmelCase : Optional[Any] = tokenizer.encode('sequence builders' , add_special_tokens=lowercase_ )
UpperCAmelCase : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase_ )
UpperCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowercase_ )
UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase : str = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase : List[Any] = tokenizer_r.encode_plus(
lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ , )
UpperCAmelCase : List[str] = tokenizer_r.do_lower_case if hasattr(lowercase_ , 'do_lower_case' ) else False
UpperCAmelCase : str = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase : Tuple = ['的', '人', '有']
UpperCAmelCase : Tuple = ''.join(lowercase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase : Dict = tokenizer_p.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase : Optional[int] = tokenizer_r.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase : Tuple = tokenizer_r.convert_ids_to_tokens(lowercase_ )
UpperCAmelCase : Any = tokenizer_p.convert_ids_to_tokens(lowercase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase : Any = False
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase : List[str] = tokenizer_r.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase : str = tokenizer_p.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(lowercase_ )
UpperCAmelCase : Any = tokenizer_p.convert_ids_to_tokens(lowercase_ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase : str = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowercase_ )
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
import math
def UpperCamelCase( UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = F"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCAmelCase_ )
if number < 1:
UpperCAmelCase : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCAmelCase_ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
UpperCAmelCase : str = int(math.log(number // 3 , 2 ) ) + 2
UpperCAmelCase : Union[str, Any] = [3, 5]
UpperCAmelCase : str = 2
UpperCAmelCase : List[str] = 3
for block in range(1 , UpperCAmelCase_ ):
for _ in range(UpperCAmelCase_ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowercase__ = 0
try:
lowercase__ = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 695 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
UpperCAmelCase_ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[str] = XGLMConfig
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : int = """gelu"""
def __init__( self : List[Any] , lowercase_ : int , lowercase_ : Any=14 , lowercase_ : List[str]=7 , lowercase_ : Optional[Any]=True , lowercase_ : str=True , lowercase_ : str=True , lowercase_ : List[Any]=99 , lowercase_ : Any=32 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[int]=4 , lowercase_ : int=37 , lowercase_ : Tuple="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : Any=0.02 , ) -> Tuple:
UpperCAmelCase : int = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : List[Any] = seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : List[Any] = use_input_mask
UpperCAmelCase : Any = use_labels
UpperCAmelCase : Tuple = vocab_size
UpperCAmelCase : Union[str, Any] = d_model
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = ffn_dim
UpperCAmelCase : Optional[int] = activation_function
UpperCAmelCase : Dict = activation_dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : str = initializer_range
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : str = 0
UpperCAmelCase : int = 2
UpperCAmelCase : int = 1
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : int = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCAmelCase : int = None
if self.use_input_mask:
UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : int = self.get_config()
UpperCAmelCase : str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowercase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Any = config_and_inputs
UpperCAmelCase : int = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCAmelCase_ : Any = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCAmelCase_ : List[Any] = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Any = False
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase : Dict = TFXGLMModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
self.config_tester.run_common_tests()
@slow
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = TFXGLMModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def UpperCAmelCase_ ( self : Dict ) -> Any:
super().test_resize_token_embeddings()
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[Any]=True ) -> Any:
UpperCAmelCase : Any = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCAmelCase : Tuple = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCAmelCase : List[str] = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCAmelCase : Optional[Any] = model.generate(lowercase_ , do_sample=lowercase_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_ )
@slow
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
UpperCAmelCase : Optional[Any] = tokenizer('Today is a nice day and' , return_tensors='tf' )
UpperCAmelCase : List[str] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
UpperCAmelCase : Tuple = model.generate(lowercase_ , do_sample=lowercase_ , seed=[7, 0] )
UpperCAmelCase : int = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase_ )
UpperCAmelCase : str = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : str ) -> Tuple:
UpperCAmelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCAmelCase : Optional[Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCAmelCase : Optional[int] = 'left'
# use different length sentences to test batching
UpperCAmelCase : Any = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
UpperCAmelCase : int = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ )
UpperCAmelCase : Any = inputs['input_ids']
UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
UpperCAmelCase : List[str] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
UpperCAmelCase : int = model.generate(input_ids=lowercase_ , max_new_tokens=12 )
UpperCAmelCase : Union[str, Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
UpperCAmelCase : List[str] = model.generate(input_ids=lowercase_ , max_new_tokens=12 )
UpperCAmelCase : List[str] = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
UpperCAmelCase : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
UpperCAmelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
UpperCAmelCase : Optional[Any] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
| 695 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase__ = 250004
lowercase__ = 250020
@require_sentencepiece
@require_tokenizers
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = MBartaaTokenizer
UpperCAmelCase_ : int = MBartaaTokenizerFast
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : str = True
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = MBartaaTokenizer(lowercase_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase : str = '<s>'
UpperCAmelCase : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
UpperCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowercase_ ) , 1_054 )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : List[Any] = MBartaaTokenizer(lowercase_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=lowercase_ )
UpperCAmelCase : Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase : Union[str, Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
# fmt: off
UpperCAmelCase : Dict = {'input_ids': [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase : List[Any] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : Union[str, Any] = tokenizer_r.save_pretrained(lowercase_ )
UpperCAmelCase : Optional[Any] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
UpperCAmelCase : Optional[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
UpperCAmelCase : Tuple = tokenizer_r.from_pretrained(lowercase_ )
UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : int = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
UpperCAmelCase : Union[str, Any] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
UpperCAmelCase : Tuple = tokenizer_r.from_pretrained(lowercase_ )
UpperCAmelCase : str = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase : str = tokenizer_r.from_pretrained(lowercase_ )
UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """facebook/mbart-large-50-one-to-many-mmt"""
UpperCAmelCase_ : Optional[int] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCAmelCase_ : Any = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCAmelCase_ : int = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def UpperCAmelCase_ ( cls : List[str] ) -> Any:
UpperCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
UpperCAmelCase : Any = 1
return cls
def UpperCAmelCase_ ( self : int ) -> List[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 250_038 )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
UpperCAmelCase : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
self.assertIn(lowercase_ , self.tokenizer.all_special_ids )
UpperCAmelCase : str = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
UpperCAmelCase : Dict = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
UpperCAmelCase : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertNotIn(self.tokenizer.eos_token , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> int:
UpperCAmelCase : Any = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , lowercase_ )
UpperCAmelCase : str = 10
UpperCAmelCase : List[str] = self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_ ).input_ids[0]
self.assertEqual(ids[0] , lowercase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowercase_ ) , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250_053, 250_001] )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Tuple = tempfile.mkdtemp()
UpperCAmelCase : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = MBartaaTokenizer.from_pretrained(lowercase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_ )
@require_torch
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase_ , return_tensors='pt' )
UpperCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
UpperCAmelCase : List[str] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
UpperCAmelCase : Dict = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
UpperCAmelCase : List[str] = self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors='pt' )
UpperCAmelCase : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors='pt' )
UpperCAmelCase : List[str] = targets['input_ids']
UpperCAmelCase : List[Any] = shift_tokens_right(lowercase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
UpperCAmelCase : Any = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(lowercase_ ) , {
# en_XX, A, test, EOS
'input_ids': [[250_004, 62, 3_034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250_001,
} , )
| 695 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 1 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase__ = "bert-base-cased"
lowercase__ = "google/pegasus-xsum"
lowercase__ = [" Sam ate lunch today.", "Sams lunch ingredients."]
lowercase__ = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
lowercase__ = "patrickvonplaten/t5-tiny-random"
lowercase__ = "sshleifer/bart-tiny-random"
lowercase__ = "sshleifer/tiny-mbart"
lowercase__ = "sshleifer/tiny-marian-en-de"
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = '\n'.join(UpperCAmelCase_ )
Path(UpperCAmelCase_ ).open('w' ).writelines(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(UpperCAmelCase_ , F"""{split}.source""" ) , UpperCAmelCase_ )
_dump_articles(os.path.join(UpperCAmelCase_ , F"""{split}.target""" ) , UpperCAmelCase_ )
return tmp_dir
class A_ ( _snake_case ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Dict ) -> str:
UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(lowercase_ )
UpperCAmelCase : Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCAmelCase : Union[str, Any] = max(len(tokenizer.encode(lowercase_ ) ) for a in ARTICLES )
UpperCAmelCase : str = max(len(tokenizer.encode(lowercase_ ) ) for a in SUMMARIES )
UpperCAmelCase : Union[str, Any] = 4
UpperCAmelCase : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
UpperCAmelCase , UpperCAmelCase : List[str] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
UpperCAmelCase : Union[str, Any] = SeqaSeqDataset(
lowercase_ , data_dir=lowercase_ , type_path='train' , max_source_length=lowercase_ , max_target_length=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_ , )
UpperCAmelCase : Dict = DataLoader(lowercase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowercase_ , lowercase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
UpperCAmelCase : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Any ) -> Any:
UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(lowercase_ )
UpperCAmelCase : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCAmelCase : int = max(len(tokenizer.encode(lowercase_ ) ) for a in ARTICLES )
UpperCAmelCase : Optional[int] = max(len(tokenizer.encode(lowercase_ ) ) for a in SUMMARIES )
UpperCAmelCase : Any = 4
UpperCAmelCase : List[str] = LegacySeqaSeqDataset(
lowercase_ , data_dir=lowercase_ , type_path='train' , max_source_length=20 , max_target_length=lowercase_ , )
UpperCAmelCase : Union[str, Any] = DataLoader(lowercase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
UpperCAmelCase : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
UpperCAmelCase : Union[str, Any] = tmp_dir.joinpath('train.source' ).open().readlines()
UpperCAmelCase : List[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowercase_ , lowercase_ , 128 , lowercase_ )
UpperCAmelCase : int = {x.name for x in tmp_dir.iterdir()}
UpperCAmelCase : Union[str, Any] = {x.name for x in save_dir.iterdir()}
UpperCAmelCase : Optional[Any] = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowercase_ ) < len(lowercase_ )
assert len(lowercase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowercase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
if not FAIRSEQ_AVAILABLE:
return
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = self._get_dataset(max_len=64 )
UpperCAmelCase : Optional[int] = 64
UpperCAmelCase : Optional[Any] = ds.make_dynamic_sampler(lowercase_ , required_batch_size_multiple=lowercase_ )
UpperCAmelCase : Any = [len(lowercase_ ) for x in batch_sampler]
assert len(set(lowercase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowercase_ ) == len(lowercase_ ) # no dropped or added examples
UpperCAmelCase : str = DataLoader(lowercase_ , batch_sampler=lowercase_ , collate_fn=ds.collate_fn , num_workers=2 )
UpperCAmelCase : List[str] = []
UpperCAmelCase : Any = []
for batch in data_loader:
UpperCAmelCase : Union[str, Any] = batch['input_ids'].shape
UpperCAmelCase : List[str] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
UpperCAmelCase : Tuple = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowercase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowercase_ )
assert num_src_per_batch[0] == max(lowercase_ )
if failures:
raise AssertionError(f"""too many tokens in {len(lowercase_ )} batches""" )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self._get_dataset(max_len=512 )
UpperCAmelCase : Any = 2
UpperCAmelCase : Optional[int] = ds.make_sortish_sampler(lowercase_ , shuffle=lowercase_ )
UpperCAmelCase : int = DataLoader(lowercase_ , batch_size=lowercase_ , collate_fn=ds.collate_fn , num_workers=2 )
UpperCAmelCase : List[str] = DataLoader(lowercase_ , batch_size=lowercase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowercase_ )
UpperCAmelCase : Optional[Any] = tokenizer.pad_token_id
def count_pad_tokens(lowercase_ : str , lowercase_ : List[Any]="input_ids" ):
return [batch[k].eq(lowercase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowercase_ , k='labels' ) ) < sum(count_pad_tokens(lowercase_ , k='labels' ) )
assert sum(count_pad_tokens(lowercase_ ) ) < sum(count_pad_tokens(lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : List[Any]=1_000 , lowercase_ : Optional[Any]=128 ) -> Any:
if os.getenv('USE_REAL_DATA' , lowercase_ ):
UpperCAmelCase : int = 'examples/seq2seq/wmt_en_ro'
UpperCAmelCase : Any = max_len * 2 * 64
if not Path(lowercase_ ).joinpath('train.len' ).exists():
save_len_file(lowercase_ , lowercase_ )
else:
UpperCAmelCase : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
UpperCAmelCase : Any = max_len * 4
save_len_file(lowercase_ , lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = SeqaSeqDataset(
lowercase_ , data_dir=lowercase_ , type_path='train' , max_source_length=lowercase_ , max_target_length=lowercase_ , n_obs=lowercase_ , )
return ds, max_tokens, tokenizer
def UpperCAmelCase_ ( self : Dict ) -> Dict:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self._get_dataset()
UpperCAmelCase : Optional[Any] = set(DistributedSortishSampler(lowercase_ , 256 , num_replicas=2 , rank=0 , add_extra_examples=lowercase_ ) )
UpperCAmelCase : Optional[Any] = set(DistributedSortishSampler(lowercase_ , 256 , num_replicas=2 , rank=1 , add_extra_examples=lowercase_ ) )
assert idsa.intersection(lowercase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : List[str] ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase_ , use_fast=lowercase_ )
if tok_name == MBART_TINY:
UpperCAmelCase : Any = SeqaSeqDataset(
lowercase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
UpperCAmelCase : Dict = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
UpperCAmelCase : Dict = SeqaSeqDataset(
lowercase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
UpperCAmelCase : Dict = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowercase_ ) == 1 if tok_name == BART_TINY else len(lowercase_ ) == 0
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( ):
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowercase__ = generate_large_matrix()
lowercase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCamelCase( UpperCAmelCase_ ):
assert all(row == sorted(UpperCAmelCase_ , reverse=UpperCAmelCase_ ) for row in grid )
assert all(list(UpperCAmelCase_ ) == sorted(UpperCAmelCase_ , reverse=UpperCAmelCase_ ) for col in zip(*UpperCAmelCase_ ) )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : int = 0
UpperCAmelCase : List[Any] = len(UpperCAmelCase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCAmelCase : List[str] = (left + right) // 2
UpperCAmelCase : Optional[int] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCAmelCase : Optional[int] = mid + 1
else:
UpperCAmelCase : List[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = 0
UpperCAmelCase : int = len(grid[0] )
for i in range(len(UpperCAmelCase_ ) ):
UpperCAmelCase : List[str] = find_negative_index(grid[i][:bound] )
total += bound
return (len(UpperCAmelCase_ ) * len(grid[0] )) - total
def UpperCamelCase( UpperCAmelCase_ ):
return len([number for row in grid for number in row if number < 0] )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = 0
for row in grid:
for i, number in enumerate(UpperCAmelCase_ ):
if number < 0:
total += len(UpperCAmelCase_ ) - i
break
return total
def UpperCamelCase( ):
from timeit import timeit
print('Running benchmarks' )
UpperCAmelCase : int = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCAmelCase : Tuple = timeit(F"""{func}(grid=grid)""" , setup=UpperCAmelCase_ , number=5_00 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 1 |
'''simple docstring'''
import argparse
from collections import defaultdict
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(UpperCAmelCase_ , 'r' ) as f:
UpperCAmelCase : Optional[Any] = f.readlines()
UpperCAmelCase : List[Any] = F"""class {class_name}("""
UpperCAmelCase : Optional[int] = F"""{4 * " "}def {test_name}("""
UpperCAmelCase : Optional[int] = F"""{8 * " "}{correct_line.split()[0]}"""
UpperCAmelCase : str = F"""{16 * " "}{correct_line.split()[0]}"""
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Any = False
UpperCAmelCase : int = 0
UpperCAmelCase : Any = 0
UpperCAmelCase : Dict = []
for line in lines:
if line.startswith(UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = True
elif in_class and line.startswith(UpperCAmelCase_ ):
UpperCAmelCase : int = True
elif in_class and in_func and (line.startswith(UpperCAmelCase_ ) or line.startswith(UpperCAmelCase_ )):
UpperCAmelCase : List[Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase : str = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
UpperCAmelCase : Union[str, Any] = False
else:
new_lines.append(UpperCAmelCase_ )
with open(UpperCAmelCase_ , 'w' ) as f:
for line in new_lines:
f.write(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=None ):
if fail is not None:
with open(UpperCAmelCase_ , 'r' ) as f:
UpperCAmelCase : Optional[Any] = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase : Tuple = None
with open(UpperCAmelCase_ , 'r' ) as f:
UpperCAmelCase : List[Any] = f.readlines()
UpperCAmelCase : Optional[int] = defaultdict(UpperCAmelCase_ )
for line in correct_lines:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
lowercase__ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
UpperCAmelCase : Tuple = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(UpperCAmelCase_ )
DownloadCommand.register_subcommand(UpperCAmelCase_ )
EnvironmentCommand.register_subcommand(UpperCAmelCase_ )
RunCommand.register_subcommand(UpperCAmelCase_ )
ServeCommand.register_subcommand(UpperCAmelCase_ )
UserCommands.register_subcommand(UpperCAmelCase_ )
AddNewModelCommand.register_subcommand(UpperCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(UpperCAmelCase_ )
LfsCommands.register_subcommand(UpperCAmelCase_ )
PTtoTFCommand.register_subcommand(UpperCAmelCase_ )
# Let's go
UpperCAmelCase : str = parser.parse_args()
if not hasattr(UpperCAmelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase : Union[str, Any] = args.func(UpperCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 695 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 | 1 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 1 |
'''simple docstring'''
import math
class A_ :
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : list[list[float]] , lowercase_ : list[int] ) -> int:
UpperCAmelCase : List[str] = 0.0
UpperCAmelCase : Any = 0.0
for i in range(len(lowercase_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : list[list[int | float]] , lowercase_ : list[int] , lowercase_ : int , lowercase_ : float ) -> list[list[int | float]]:
for i in range(len(lowercase_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCamelCase( ):
# Training Examples ( m, n )
UpperCAmelCase : Any = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
UpperCAmelCase : Any = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
UpperCAmelCase : List[Any] = SelfOrganizingMap()
UpperCAmelCase : Optional[int] = 3
UpperCAmelCase : List[Any] = 0.5
for _ in range(UpperCAmelCase_ ):
for j in range(len(UpperCAmelCase_ ) ):
# training sample
UpperCAmelCase : Optional[int] = training_samples[j]
# Compute the winning vector
UpperCAmelCase : Any = self_organizing_map.get_winner(UpperCAmelCase_ , UpperCAmelCase_ )
# Update the winning vector
UpperCAmelCase : str = self_organizing_map.update(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# classify test sample
UpperCAmelCase : List[Any] = [0, 0, 0, 1]
UpperCAmelCase : Dict = self_organizing_map.get_winner(UpperCAmelCase_ , UpperCAmelCase_ )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 695 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 1 |
'''simple docstring'''
import numpy as np
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = int(np.ceil((x_end - xa) / h ) )
UpperCAmelCase : Union[str, Any] = np.zeros((n + 1,) )
UpperCAmelCase : Dict = ya
UpperCAmelCase : Optional[int] = xa
for k in range(UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = f(UpperCAmelCase_ , y[k] )
UpperCAmelCase : Dict = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCAmelCase : Tuple = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCAmelCase : Any = f(x + h , y[k] + h * ka )
UpperCAmelCase : int = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 1 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = DistilBertTokenizer
UpperCAmelCase_ : Any = DistilBertTokenizerFast
UpperCAmelCase_ : Optional[Any] = True
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Dict = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
UpperCAmelCase : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase_ )
UpperCAmelCase : Tuple = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase_ )
UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ )
UpperCAmelCase : int = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 695 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = UnCLIPImageVariationPipeline
UpperCAmelCase_ : Any = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
UpperCAmelCase_ : List[Any] = IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ : str = [
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
UpperCAmelCase_ : Dict = False
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
return 32
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
return 32
@property
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
return 100
@property
def UpperCAmelCase_ ( self : str ) -> List[Any]:
UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
torch.manual_seed(0 )
UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(lowercase_ )
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(lowercase_ )
@property
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
UpperCAmelCase : List[Any] = UnCLIPTextProjModel(**lowercase_ )
return model
@property
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : List[str] = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
UpperCAmelCase : List[Any] = UNetaDConditionModel(**lowercase_ )
return model
@property
def UpperCAmelCase_ ( self : Any ) -> Any:
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def UpperCAmelCase_ ( self : str ) -> Tuple:
torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
UpperCAmelCase : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : Tuple = self.dummy_decoder
UpperCAmelCase : List[Any] = self.dummy_text_proj
UpperCAmelCase : str = self.dummy_text_encoder
UpperCAmelCase : List[Any] = self.dummy_tokenizer
UpperCAmelCase : Any = self.dummy_super_res_first
UpperCAmelCase : Optional[Any] = self.dummy_super_res_last
UpperCAmelCase : Any = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
UpperCAmelCase : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
UpperCAmelCase : Union[str, Any] = CLIPImageProcessor(crop_size=32 , size=32 )
UpperCAmelCase : Union[str, Any] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def UpperCAmelCase_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : Any=0 , lowercase_ : Union[str, Any]=True ) -> List[str]:
UpperCAmelCase : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
if str(lowercase_ ).startswith('mps' ):
UpperCAmelCase : Dict = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
if pil_image:
UpperCAmelCase : Tuple = input_image * 0.5 + 0.5
UpperCAmelCase : Union[str, Any] = input_image.clamp(0 , 1 )
UpperCAmelCase : Optional[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase : str = DiffusionPipeline.numpy_to_pil(lowercase_ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def UpperCAmelCase_ ( self : str ) -> Any:
UpperCAmelCase : Any = 'cpu'
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : Optional[int] = self.pipeline_class(**lowercase_ )
UpperCAmelCase : Optional[int] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : int = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
UpperCAmelCase : int = pipe(**lowercase_ )
UpperCAmelCase : str = output.images
UpperCAmelCase : Dict = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
UpperCAmelCase : List[Any] = pipe(
**lowercase_ , return_dict=lowercase_ , )[0]
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Optional[Any] = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Dict ) -> str:
UpperCAmelCase : Dict = 'cpu'
UpperCAmelCase : Any = self.get_dummy_components()
UpperCAmelCase : List[Any] = self.pipeline_class(**lowercase_ )
UpperCAmelCase : Tuple = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
UpperCAmelCase : Union[str, Any] = pipe(**lowercase_ )
UpperCAmelCase : List[Any] = output.images
UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
UpperCAmelCase : List[str] = pipe(
**lowercase_ , return_dict=lowercase_ , )[0]
UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Dict = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : List[Any] ) -> int:
UpperCAmelCase : List[Any] = 'cpu'
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : int = self.pipeline_class(**lowercase_ )
UpperCAmelCase : Tuple = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Any = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
UpperCAmelCase : List[Any] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
UpperCAmelCase : List[Any] = pipe(**lowercase_ )
UpperCAmelCase : Tuple = output.images
UpperCAmelCase : str = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
UpperCAmelCase : Optional[int] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
UpperCAmelCase : Dict = pipe(
**lowercase_ , return_dict=lowercase_ , )[0]
UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
UpperCAmelCase : int = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : Dict = torch.device('cpu' )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : List[Any] = self.pipeline_class(**lowercase_ )
UpperCAmelCase : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase : Tuple = pipe.decoder.dtype
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[Any] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
UpperCAmelCase : Any = pipe.prepare_latents(
lowercase_ , dtype=lowercase_ , device=lowercase_ , generator=lowercase_ , latents=lowercase_ , scheduler=DummyScheduler() )
UpperCAmelCase : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
UpperCAmelCase : List[Any] = pipe.prepare_latents(
lowercase_ , dtype=lowercase_ , device=lowercase_ , generator=lowercase_ , latents=lowercase_ , scheduler=DummyScheduler() )
UpperCAmelCase : List[str] = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
UpperCAmelCase : Any = pipe(
**lowercase_ , decoder_latents=lowercase_ , super_res_latents=lowercase_ ).images
UpperCAmelCase : int = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
# Don't pass image, instead pass embedding
UpperCAmelCase : Any = pipeline_inputs.pop('image' )
UpperCAmelCase : List[str] = pipe.image_encoder(lowercase_ ).image_embeds
UpperCAmelCase : str = pipe(
**lowercase_ , decoder_latents=lowercase_ , super_res_latents=lowercase_ , image_embeddings=lowercase_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : Any = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
UpperCAmelCase : Tuple = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , expected_max_diff=lowercase_ )
@skip_mps
def UpperCAmelCase_ ( self : str ) -> List[str]:
UpperCAmelCase : Dict = torch_device == 'cpu'
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , additional_params_copy_to_batched_inputs=lowercase_ , )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
UpperCAmelCase : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowercase_ , additional_params_copy_to_batched_inputs=lowercase_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowercase_ )
@skip_mps
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCAmelCase_ ( self : Any ) -> Dict:
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
UpperCAmelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
UpperCAmelCase : List[Any] = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
UpperCAmelCase : Dict = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCAmelCase : Dict = pipeline(
lowercase_ , generator=lowercase_ , output_type='np' , )
UpperCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ , 15 )
| 695 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """gpt_bigcode"""
UpperCAmelCase_ : List[Any] = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[Any] , lowercase_ : Dict=50_257 , lowercase_ : Optional[Any]=1_024 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : List[Any]=12 , lowercase_ : int=None , lowercase_ : List[Any]="gelu_pytorch_tanh" , lowercase_ : Optional[int]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Any=1E-5 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[Any]=True , lowercase_ : Tuple=True , lowercase_ : Optional[int]=50_256 , lowercase_ : Tuple=50_256 , lowercase_ : List[str]=True , lowercase_ : Any=True , lowercase_ : str=True , **lowercase_ : int , ) -> Optional[int]:
UpperCAmelCase : List[str] = vocab_size
UpperCAmelCase : Optional[int] = n_positions
UpperCAmelCase : Tuple = n_embd
UpperCAmelCase : Any = n_layer
UpperCAmelCase : str = n_head
UpperCAmelCase : Dict = n_inner
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Optional[Any] = resid_pdrop
UpperCAmelCase : Tuple = embd_pdrop
UpperCAmelCase : int = attn_pdrop
UpperCAmelCase : Tuple = layer_norm_epsilon
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = scale_attn_weights
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : Tuple = attention_softmax_in_fpaa
UpperCAmelCase : List[Any] = scale_attention_softmax_in_fpaa
UpperCAmelCase : Optional[int] = multi_query
UpperCAmelCase : List[str] = bos_token_id
UpperCAmelCase : Any = eos_token_id
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 695 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
lowercase__ = list[tuple[int, int]]
lowercase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class A_ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : Node | None ) -> Optional[int]:
UpperCAmelCase : Optional[int] = pos_x
UpperCAmelCase : Tuple = pos_y
UpperCAmelCase : List[str] = (pos_y, pos_x)
UpperCAmelCase : List[Any] = goal_x
UpperCAmelCase : List[str] = goal_y
UpperCAmelCase : List[Any] = parent
class A_ :
'''simple docstring'''
def __init__( self : Dict , lowercase_ : tuple[int, int] , lowercase_ : tuple[int, int] ) -> Tuple:
UpperCAmelCase : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , lowercase_ )
UpperCAmelCase : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowercase_ )
UpperCAmelCase : Union[str, Any] = [self.start]
UpperCAmelCase : str = False
def UpperCAmelCase_ ( self : Any ) -> Path | None:
while self.node_queue:
UpperCAmelCase : List[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase : List[Any] = True
return self.retrace_path(lowercase_ )
UpperCAmelCase : Any = self.get_successors(lowercase_ )
for node in successors:
self.node_queue.append(lowercase_ )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Node ) -> list[Node]:
UpperCAmelCase : List[str] = []
for action in delta:
UpperCAmelCase : str = parent.pos_x + action[1]
UpperCAmelCase : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , lowercase_ ) )
return successors
def UpperCAmelCase_ ( self : str , lowercase_ : Node | None ) -> Path:
UpperCAmelCase : str = node
UpperCAmelCase : Dict = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase : List[Any] = current_node.parent
path.reverse()
return path
class A_ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ) -> List[str]:
UpperCAmelCase : str = BreadthFirstSearch(lowercase_ , lowercase_ )
UpperCAmelCase : Optional[int] = BreadthFirstSearch(lowercase_ , lowercase_ )
UpperCAmelCase : List[str] = False
def UpperCAmelCase_ ( self : str ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
UpperCAmelCase : Any = self.fwd_bfs.node_queue.pop(0 )
UpperCAmelCase : Any = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
UpperCAmelCase : Tuple = True
return self.retrace_bidirectional_path(
lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = current_bwd_node
UpperCAmelCase : Tuple = current_fwd_node
UpperCAmelCase : Dict = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowercase_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowercase_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowercase_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase_ ( self : int , lowercase_ : Node , lowercase_ : Node ) -> Path:
UpperCAmelCase : Union[str, Any] = self.fwd_bfs.retrace_path(lowercase_ )
UpperCAmelCase : Any = self.bwd_bfs.retrace_path(lowercase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowercase__ = (0, 0)
lowercase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowercase__ = time.time()
lowercase__ = BreadthFirstSearch(init, goal)
lowercase__ = bfs.search()
lowercase__ = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
lowercase__ = time.time()
lowercase__ = BidirectionalBreadthFirstSearch(init, goal)
lowercase__ = bd_bfs.search()
lowercase__ = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 695 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 1 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[str] , lowercase_ : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : int = 3
UpperCAmelCase : Optional[int] = 250
UpperCAmelCase : Any = ids_tensor((batch_size, length) , lowercase_ )
UpperCAmelCase : Optional[int] = torch.ones((batch_size, length) , device=lowercase_ , dtype=torch.float ) / length
return input_ids, scores
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
UpperCAmelCase , UpperCAmelCase : Any = self._get_tensors(5 )
UpperCAmelCase : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
UpperCAmelCase , UpperCAmelCase : Any = self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = MaxLengthCriteria(max_length=10 )
UpperCAmelCase , UpperCAmelCase : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
UpperCAmelCase , UpperCAmelCase : Any = self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def UpperCAmelCase_ ( self : Dict ) -> str:
UpperCAmelCase : List[str] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase : List[Any] = self._get_tensors(5 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
UpperCAmelCase , UpperCAmelCase : List[Any] = self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
UpperCAmelCase , UpperCAmelCase : int = self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
UpperCAmelCase : List[str] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
UpperCAmelCase , UpperCAmelCase : Dict = self._get_tensors(5 )
UpperCAmelCase : Union[str, Any] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
UpperCAmelCase : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowercase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase : Optional[Any] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowercase_ ) , 1 )
| 695 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """bridgetower_vision_model"""
def __init__( self : List[str] , lowercase_ : Any=768 , lowercase_ : Dict=12 , lowercase_ : List[str]=3 , lowercase_ : int=16 , lowercase_ : Optional[Any]=288 , lowercase_ : Tuple=1 , lowercase_ : List[str]=1E-05 , lowercase_ : Tuple=False , lowercase_ : str=True , lowercase_ : Optional[int]=False , **lowercase_ : Any , ) -> str:
super().__init__(**lowercase_ )
UpperCAmelCase : int = hidden_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : Optional[int] = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : Optional[Any] = layer_norm_eps
UpperCAmelCase : Tuple = stop_gradient
UpperCAmelCase : Tuple = share_layernorm
UpperCAmelCase : int = remove_last_layer
@classmethod
def UpperCAmelCase_ ( cls : Any , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
if config_dict.get('model_type' ) == "bridgetower":
UpperCAmelCase : Dict = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Any = """bridgetower_text_model"""
def __init__( self : Any , lowercase_ : Optional[int]=50_265 , lowercase_ : List[Any]=768 , lowercase_ : int=12 , lowercase_ : Any=12 , lowercase_ : Optional[Any]=1 , lowercase_ : Any=3_072 , lowercase_ : Optional[int]="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=514 , lowercase_ : str=1 , lowercase_ : List[str]=1E-05 , lowercase_ : str=1 , lowercase_ : List[Any]=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : Any="absolute" , lowercase_ : Any=True , **lowercase_ : List[str] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : Dict = type_vocab_size
UpperCAmelCase : Optional[Any] = layer_norm_eps
UpperCAmelCase : Optional[int] = position_embedding_type
UpperCAmelCase : Optional[Any] = use_cache
UpperCAmelCase : str = pad_token_id
UpperCAmelCase : Dict = bos_token_id
UpperCAmelCase : Dict = eos_token_id
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
if config_dict.get('model_type' ) == "bridgetower":
UpperCAmelCase : Optional[int] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """bridgetower"""
def __init__( self : List[str] , lowercase_ : Dict=True , lowercase_ : Optional[Any]="gelu" , lowercase_ : Optional[Any]=768 , lowercase_ : List[Any]=1 , lowercase_ : Union[str, Any]=1E-05 , lowercase_ : Optional[int]=False , lowercase_ : Any="add" , lowercase_ : str=12 , lowercase_ : Optional[Any]=6 , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=None , **lowercase_ : Any , ) -> Union[str, Any]:
# TODO: remove this once the Hub files are updated.
UpperCAmelCase : Any = kwargs.pop('text_config_dict' , lowercase_ )
UpperCAmelCase : Optional[Any] = kwargs.pop('vision_config_dict' , lowercase_ )
super().__init__(**lowercase_ )
UpperCAmelCase : List[Any] = share_cross_modal_transformer_layers
UpperCAmelCase : Dict = hidden_act
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : List[Any] = layer_norm_eps
UpperCAmelCase : Optional[int] = share_link_tower_layers
UpperCAmelCase : Tuple = link_tower_type
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : str = tie_word_embeddings
UpperCAmelCase : Optional[int] = init_layernorm_from_vision_encoder
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
UpperCAmelCase : str = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
UpperCAmelCase : List[Any] = BridgeTowerTextConfig(**lowercase_ )
UpperCAmelCase : List[Any] = BridgeTowerVisionConfig(**lowercase_ )
@classmethod
def UpperCAmelCase_ ( cls : int , lowercase_ : BridgeTowerTextConfig , lowercase_ : BridgeTowerVisionConfig , **lowercase_ : Any ) -> Union[str, Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : str = self.text_config.to_dict()
UpperCAmelCase : List[str] = self.vision_config.to_dict()
UpperCAmelCase : int = self.__class__.model_type
return output
| 695 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 1 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = FlaxAutoencoderKL
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : Dict = 4
UpperCAmelCase : Dict = 3
UpperCAmelCase : List[Any] = (32, 32)
UpperCAmelCase : Dict = jax.random.PRNGKey(0 )
UpperCAmelCase : int = jax.random.uniform(lowercase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : Tuple = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
UpperCAmelCase : str = self.dummy_input
return init_dict, inputs_dict
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 1 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase__ = 2
class A_ :
'''simple docstring'''
def __init__( self : Any , *, # begin keyword-only arguments
lowercase_ : List[Any]="<s>" , lowercase_ : List[str]="<pad>" , lowercase_ : Union[str, Any]="</s>" , lowercase_ : Optional[int]="<unk>" , lowercase_ : Optional[Any]=None , ) -> List[str]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = bos, unk, pad, eos
UpperCAmelCase : str = []
UpperCAmelCase : int = []
UpperCAmelCase : str = {}
UpperCAmelCase : Any = self.add_symbol(lowercase_ )
UpperCAmelCase : Tuple = self.add_symbol(lowercase_ )
UpperCAmelCase : Tuple = self.add_symbol(lowercase_ )
UpperCAmelCase : Tuple = self.add_symbol(lowercase_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowercase_ )
UpperCAmelCase : str = len(self.symbols )
def __eq__( self : str , lowercase_ : int ) -> Union[str, Any]:
return self.indices == other.indices
def __getitem__( self : Optional[Any] , lowercase_ : Union[str, Any] ) -> Tuple:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[Any] ) -> Optional[int]:
return len(self.symbols )
def __contains__( self : Tuple , lowercase_ : Dict ) -> Union[str, Any]:
return sym in self.indices
@classmethod
def UpperCAmelCase_ ( cls : int , lowercase_ : Any ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = cls()
d.add_from_file(lowercase_ )
return d
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple=1 , lowercase_ : List[str]=False ) -> List[str]:
if word in self.indices and not overwrite:
UpperCAmelCase : Dict = self.indices[word]
UpperCAmelCase : str = self.count[idx] + n
return idx
else:
UpperCAmelCase : List[str] = len(self.symbols )
UpperCAmelCase : Dict = idx
self.symbols.append(lowercase_ )
self.count.append(lowercase_ )
return idx
def UpperCAmelCase_ ( self : Any , lowercase_ : Any ) -> Optional[int]:
return 0
def UpperCAmelCase_ ( self : Dict , lowercase_ : Optional[Any] ) -> Any:
if isinstance(lowercase_ , lowercase_ ):
try:
with open(lowercase_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(lowercase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(lowercase_ ) )
return
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Tuple = self._load_meta(lowercase_ )
for line in lines[indices_start_line:]:
try:
UpperCAmelCase , UpperCAmelCase : Any = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
UpperCAmelCase : int = True
UpperCAmelCase , UpperCAmelCase : str = line.rsplit(' ' , 1 )
else:
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Optional[Any] = int(lowercase_ )
UpperCAmelCase : Optional[int] = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(lowercase_ ) )
self.add_symbol(lowercase_ , n=lowercase_ , overwrite=lowercase_ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def UpperCamelCase( UpperCAmelCase_ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCAmelCase : str = dict((re.sub(R'@@$' , '' , UpperCAmelCase_ ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , UpperCAmelCase_ ), v) for k, v in d.items() )
UpperCAmelCase : str = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
UpperCAmelCase : Any = d[k] # restore
return da
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
# prep
if not os.path.exists(UpperCAmelCase_ ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
UpperCAmelCase : Dict = os.path.join(UpperCAmelCase_ , 'checkpoint.pt' )
if not os.path.isfile(UpperCAmelCase_ ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
UpperCAmelCase : str = torch.load(UpperCAmelCase_ , map_location='cpu' )
UpperCAmelCase : List[Any] = chkpt['cfg']['model']
# dicts
UpperCAmelCase : Any = os.path.join(UpperCAmelCase_ , 'dict.txt' )
if not os.path.isfile(UpperCAmelCase_ ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
UpperCAmelCase : List[str] = Dictionary.load(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase : Dict = len(UpperCAmelCase_ )
UpperCAmelCase : int = os.path.join(UpperCAmelCase_ , VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ , indent=UpperCAmelCase_ ) )
# merges_file (bpecodes)
UpperCAmelCase : Optional[int] = os.path.join(UpperCAmelCase_ , 'bpecodes' )
if not os.path.isfile(UpperCAmelCase_ ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
UpperCAmelCase : Any = os.path.join(UpperCAmelCase_ , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(UpperCAmelCase_ , UpperCAmelCase_ )
# model config
UpperCAmelCase : List[Any] = os.path.join(UpperCAmelCase_ , 'config.json' )
UpperCAmelCase : Tuple = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ , indent=UpperCAmelCase_ ) )
# tokenizer config
UpperCAmelCase : Dict = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : int = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 10_24,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ , indent=UpperCAmelCase_ ) )
# model
UpperCAmelCase : str = chkpt['model']
# remove unneeded keys
UpperCAmelCase : Optional[Any] = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : str = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
UpperCAmelCase : Union[str, Any] = model_state_dict.pop(UpperCAmelCase_ )
else:
UpperCAmelCase : Union[str, Any] = model_state_dict.pop(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = BioGptConfig.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = BioGptForCausalLM(UpperCAmelCase_ )
# check that it loads ok
model_new.load_state_dict(UpperCAmelCase_ )
# save
UpperCAmelCase : Tuple = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
print('Conversion is done!' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 1 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = DownBlockaD # noqa F405
UpperCAmelCase_ : Any = """down"""
def UpperCAmelCase_ ( self : str ) -> Any:
UpperCAmelCase : Any = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = ResnetDownsampleBlockaD # noqa F405
UpperCAmelCase_ : int = """down"""
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase : Optional[int] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = AttnDownBlockaD # noqa F405
UpperCAmelCase_ : str = """down"""
def UpperCAmelCase_ ( self : int ) -> Tuple:
UpperCAmelCase : List[Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = CrossAttnDownBlockaD # noqa F405
UpperCAmelCase_ : List[Any] = """down"""
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : Optional[int] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Any = 32
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
UpperCAmelCase : Tuple = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = SimpleCrossAttnDownBlockaD # noqa F405
UpperCAmelCase_ : List[Any] = """down"""
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
return super().get_dummy_input(include_encoder_hidden_states=lowercase_ )
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase , UpperCAmelCase : Optional[int] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : int = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Union[str, Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = SkipDownBlockaD # noqa F405
UpperCAmelCase_ : Optional[Any] = """down"""
@property
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
return super().get_dummy_input(include_skip_sample=lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Optional[int] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = AttnSkipDownBlockaD # noqa F405
UpperCAmelCase_ : List[str] = """down"""
@property
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
return super().get_dummy_input(include_skip_sample=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> int:
UpperCAmelCase : int = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = DownEncoderBlockaD # noqa F405
UpperCAmelCase_ : Union[str, Any] = """down"""
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
return super().get_dummy_input(include_temb=lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : Tuple = {
'in_channels': 32,
'out_channels': 32,
}
UpperCAmelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : int = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = AttnDownEncoderBlockaD # noqa F405
UpperCAmelCase_ : Optional[int] = """down"""
@property
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
return super().get_dummy_input(include_temb=lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : str = {
'in_channels': 32,
'out_channels': 32,
}
UpperCAmelCase : Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : str ) -> int:
UpperCAmelCase : Any = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = UNetMidBlockaD # noqa F405
UpperCAmelCase_ : Optional[int] = """mid"""
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = {
'in_channels': 32,
'temb_channels': 128,
}
UpperCAmelCase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : Any ) -> str:
UpperCAmelCase : Optional[int] = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = UNetMidBlockaDCrossAttn # noqa F405
UpperCAmelCase_ : Union[str, Any] = """mid"""
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase : List[str] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : str ) -> str:
UpperCAmelCase : str = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = UNetMidBlockaDSimpleCrossAttn # noqa F405
UpperCAmelCase_ : Any = """mid"""
@property
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
return super().get_dummy_input(include_encoder_hidden_states=lowercase_ )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
UpperCAmelCase , UpperCAmelCase : Any = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Any = 32
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = UpBlockaD # noqa F405
UpperCAmelCase_ : List[str] = """up"""
@property
def UpperCAmelCase_ ( self : Any ) -> str:
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
UpperCAmelCase : List[Any] = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ResnetUpsampleBlockaD # noqa F405
UpperCAmelCase_ : List[Any] = """up"""
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = CrossAttnUpBlockaD # noqa F405
UpperCAmelCase_ : Tuple = """up"""
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : Tuple ) -> int:
UpperCAmelCase : int = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = SimpleCrossAttnUpBlockaD # noqa F405
UpperCAmelCase_ : Union[str, Any] = """up"""
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ , include_encoder_hidden_states=lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase : Dict = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : str = 32
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
UpperCAmelCase : Any = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = AttnUpBlockaD # noqa F405
UpperCAmelCase_ : str = """up"""
@property
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ )
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
UpperCAmelCase : List[str] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = SkipUpBlockaD # noqa F405
UpperCAmelCase_ : Union[str, Any] = """up"""
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ )
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[int] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = AttnSkipUpBlockaD # noqa F405
UpperCAmelCase_ : Union[str, Any] = """up"""
@property
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ )
def UpperCAmelCase_ ( self : int ) -> str:
UpperCAmelCase : str = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = UpDecoderBlockaD # noqa F405
UpperCAmelCase_ : str = """up"""
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
return super().get_dummy_input(include_temb=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> int:
UpperCAmelCase : Any = {'in_channels': 32, 'out_channels': 32}
UpperCAmelCase : str = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : Union[str, Any] = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = AttnUpDecoderBlockaD # noqa F405
UpperCAmelCase_ : Union[str, Any] = """up"""
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return super().get_dummy_input(include_temb=lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : List[Any] = {'in_channels': 32, 'out_channels': 32}
UpperCAmelCase : Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Tuple = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(lowercase_ )
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 1 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = """megatron-bert"""
def __init__( self : List[Any] , lowercase_ : Tuple=29_056 , lowercase_ : Any=1_024 , lowercase_ : int=24 , lowercase_ : Optional[int]=16 , lowercase_ : List[str]=4_096 , lowercase_ : str="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[Any]=2 , lowercase_ : Any=0.02 , lowercase_ : str=1E-12 , lowercase_ : Any=0 , lowercase_ : Tuple="absolute" , lowercase_ : Tuple=True , **lowercase_ : Any , ) -> Any:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Any = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : List[Any] = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : List[str] = use_cache
| 695 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 1 |
'''simple docstring'''
import math
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.pow(UpperCAmelCase_ , 2 ) - a
def UpperCamelCase( UpperCAmelCase_ ):
return 2 * x
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = 2.0
while start <= a:
UpperCAmelCase : str = math.pow(UpperCAmelCase_ , 2 )
return start
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ = 99_99 , UpperCAmelCase_ = 0.00_0000_0000_0001 ):
if a < 0:
raise ValueError('math domain error' )
UpperCAmelCase : int = get_initial_point(UpperCAmelCase_ )
for _ in range(UpperCAmelCase_ ):
UpperCAmelCase : Any = value
UpperCAmelCase : List[Any] = value - fx(UpperCAmelCase_ , UpperCAmelCase_ ) / fx_derivative(UpperCAmelCase_ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
UpperCAmelCase , UpperCAmelCase : str = array[indexa], array[indexa]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if length > 1:
UpperCAmelCase : Optional[int] = int(length / 2 )
for i in range(UpperCAmelCase_ , low + middle ):
comp_and_swap(UpperCAmelCase_ , UpperCAmelCase_ , i + middle , UpperCAmelCase_ )
bitonic_merge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
bitonic_merge(UpperCAmelCase_ , low + middle , UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if length > 1:
UpperCAmelCase : Any = int(length / 2 )
bitonic_sort(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , 1 )
bitonic_sort(UpperCAmelCase_ , low + middle , UpperCAmelCase_ , 0 )
bitonic_merge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = input("Enter numbers separated by a comma:\n").strip()
lowercase__ = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCamelCase( ):
raise RuntimeError('CUDA out of memory.' )
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : int ) -> Optional[Any]:
super().__init__()
UpperCAmelCase : Tuple = nn.Linear(3 , 4 )
UpperCAmelCase : Optional[Any] = nn.BatchNormad(4 )
UpperCAmelCase : Any = nn.Linear(4 , 5 )
def UpperCAmelCase_ ( self : Any , lowercase_ : Dict ) -> Optional[int]:
return self.lineara(self.batchnorm(self.lineara(lowercase_ ) ) )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_ : Tuple ):
nonlocal batch_sizes
batch_sizes.append(lowercase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowercase_ , [128, 64, 32, 16, 8] )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
UpperCAmelCase : Tuple = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_ : List[Any] , lowercase_ : Dict ):
nonlocal batch_sizes
batch_sizes.append(lowercase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase , UpperCAmelCase : str = mock_training_loop_function('hello' )
self.assertListEqual(lowercase_ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowercase_ : List[str] ):
pass
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCAmelCase_ ( self : Any ) -> Any:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase_ : Any ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCAmelCase_ ( self : Dict ) -> int:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_ : int , lowercase_ : int , lowercase_ : List[str] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase_ : List[str] ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
UpperCAmelCase : Tuple = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowercase_ )
UpperCAmelCase : List[str] = release_memory(lowercase_ )
self.assertEqual(torch.cuda.memory_allocated() , lowercase_ )
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 1 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = []
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for v in tree.values():
shapes.extend(_fetch_dims(UpperCAmelCase_ ) )
elif isinstance(UpperCAmelCase_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(UpperCAmelCase_ ) )
elif isinstance(UpperCAmelCase_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[str] = []
for d in reversed(UpperCAmelCase_ ):
idx.append(flat_idx % d )
UpperCAmelCase : Tuple = flat_idx // d
return tuple(reversed(UpperCAmelCase_ ) )
@torch.jit.ignore
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(UpperCAmelCase_ ) -> None:
UpperCAmelCase : str = True
for i in range(len(UpperCAmelCase_ ) ):
UpperCAmelCase : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase : int = l[reversed_idx]
if start_edges is None:
UpperCAmelCase : Union[str, Any] = [s == 0 for s in start]
reduce_edge_list(UpperCAmelCase_ )
if end_edges is None:
UpperCAmelCase : List[str] = [e == (d - 1) for e, d in zip(UpperCAmelCase_ , UpperCAmelCase_ )]
reduce_edge_list(UpperCAmelCase_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(UpperCAmelCase_ ) == 0:
return [()]
elif len(UpperCAmelCase_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase : List[Tuple[slice, ...]] = []
UpperCAmelCase : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
if s == e:
path_list.append(slice(UpperCAmelCase_ , s + 1 ) )
else:
break
UpperCAmelCase : Tuple[slice, ...] = tuple(UpperCAmelCase_ )
UpperCAmelCase : Tuple = len(UpperCAmelCase_ )
# start == end, and we're done
if divergence_idx == len(UpperCAmelCase_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase : int = start[divergence_idx]
return tuple(
path + (slice(UpperCAmelCase_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase : List[str] = end[divergence_idx]
return tuple(
path + (slice(UpperCAmelCase_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = t.shape[:no_batch_dims]
UpperCAmelCase : int = list(_flat_idx_to_idx(UpperCAmelCase_ , UpperCAmelCase_ ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase : Optional[int] = list(_flat_idx_to_idx(flat_end - 1 , UpperCAmelCase_ ) )
# Get an ordered list of slices to perform
UpperCAmelCase : Any = _get_minimal_slice_set(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCAmelCase : str = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = False , ):
if not (len(UpperCAmelCase_ ) > 0):
raise ValueError('Must provide at least one input' )
UpperCAmelCase : List[str] = [shape[:no_batch_dims] for shape in _fetch_dims(UpperCAmelCase_ )]
UpperCAmelCase : Optional[Any] = tuple([max(UpperCAmelCase_ ) for s in zip(*UpperCAmelCase_ )] )
def _prep_inputs(UpperCAmelCase_ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase : int = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase : Tuple = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase : Dict[str, Any] = tensor_tree_map(_prep_inputs , UpperCAmelCase_ )
UpperCAmelCase : Dict = None
if _out is not None:
UpperCAmelCase : Dict = tensor_tree_map(lambda UpperCAmelCase_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase : List[str] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(UpperCAmelCase_ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Tuple = prepped_outputs
for _ in range(UpperCAmelCase_ ):
# Chunk the input
if not low_mem:
UpperCAmelCase : Optional[Any] = _select_chunk
else:
UpperCAmelCase : Any = partial(
_chunk_slice , flat_start=UpperCAmelCase_ , flat_end=min(UpperCAmelCase_ , i + chunk_size ) , no_batch_dims=len(UpperCAmelCase_ ) , )
UpperCAmelCase : Dict[str, Any] = tensor_tree_map(UpperCAmelCase_ , UpperCAmelCase_ )
# Run the layer on the chunk
UpperCAmelCase : Dict = layer(**UpperCAmelCase_ )
# Allocate space for the output
if out is None:
UpperCAmelCase : Union[str, Any] = tensor_tree_map(lambda UpperCAmelCase_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , UpperCAmelCase_ )
# Put the chunk in its pre-allocated space
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
def assign(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
for k, v in da.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
assign(UpperCAmelCase_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase : int = da[k]
assign(UpperCAmelCase_ , UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for xa, xa in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase : Optional[int] = xa
elif isinstance(UpperCAmelCase_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase : Tuple = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
UpperCAmelCase : List[str] = tensor_tree_map(lambda UpperCAmelCase_ : t.view(orig_batch_dims + t.shape[1:] ) , UpperCAmelCase_ )
return out
class A_ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : int = 512 , ) -> int:
UpperCAmelCase : Any = max_chunk_size
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : Optional[tuple] = None
def UpperCAmelCase_ ( self : str , lowercase_ : Callable , lowercase_ : tuple , lowercase_ : int ) -> int:
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase : Optional[int] = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowercase_ : int ) -> bool:
try:
with torch.no_grad():
fn(*lowercase_ , chunk_size=lowercase_ )
return True
except RuntimeError:
return False
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase : Optional[Any] = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase : str = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase : Union[str, Any] = i
UpperCAmelCase : str = (i + len(lowercase_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Iterable , lowercase_ : Iterable ) -> bool:
UpperCAmelCase : Optional[int] = True
for aa, aa in zip(lowercase_ , lowercase_ ):
assert type(lowercase_ ) == type(lowercase_ )
if isinstance(lowercase_ , (list, tuple) ):
consistent &= self._compare_arg_caches(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Tuple = [v for _, v in sorted(aa.items() , key=lambda lowercase_ : x[0] )]
UpperCAmelCase : Optional[int] = [v for _, v in sorted(aa.items() , key=lambda lowercase_ : x[0] )]
consistent &= self._compare_arg_caches(lowercase_ , lowercase_ )
else:
consistent &= aa == aa
return consistent
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Callable , lowercase_ : tuple , lowercase_ : int , ) -> int:
UpperCAmelCase : str = True
UpperCAmelCase : tuple = tree_map(lambda lowercase_ : a.shape if isinstance(lowercase_ , torch.Tensor ) else a , lowercase_ , lowercase_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(lowercase_ )
UpperCAmelCase : int = self._compare_arg_caches(self.cached_arg_data , lowercase_ )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase : Union[str, Any] = False
if not consistent:
UpperCAmelCase : Any = self._determine_favorable_chunk_size(
lowercase_ , lowercase_ , lowercase_ , )
UpperCAmelCase : int = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowercase__ = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def UpperCamelCase( UpperCAmelCase_ = "mumbai" ):
UpperCAmelCase : Any = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
UpperCAmelCase : Any = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
UpperCAmelCase : Optional[Any] = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = len(set_a.intersection(UpperCAmelCase_ ) )
if alternative_union:
UpperCAmelCase : Any = len(UpperCAmelCase_ ) + len(UpperCAmelCase_ )
else:
UpperCAmelCase : Tuple = len(set_a.union(UpperCAmelCase_ ) )
return intersection / union
if isinstance(UpperCAmelCase_ , (list, tuple) ) and isinstance(UpperCAmelCase_ , (list, tuple) ):
UpperCAmelCase : str = [element for element in set_a if element in set_b]
if alternative_union:
UpperCAmelCase : Optional[Any] = len(UpperCAmelCase_ ) + len(UpperCAmelCase_ )
return len(UpperCAmelCase_ ) / union
else:
UpperCAmelCase : List[str] = set_a + [element for element in set_b if element not in set_a]
return len(UpperCAmelCase_ ) / len(UpperCAmelCase_ )
return len(UpperCAmelCase_ ) / len(UpperCAmelCase_ )
return None
if __name__ == "__main__":
lowercase__ = {"a", "b", "c", "d", "e"}
lowercase__ = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 695 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 1 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowercase__ = "examples/"
lowercase__ = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
lowercase__ = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
lowercase__ = "README.md"
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
with open(UpperCAmelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase : Optional[Any] = f.read()
UpperCAmelCase , UpperCAmelCase : Optional[int] = REPLACE_PATTERNS[pattern]
UpperCAmelCase : Optional[int] = replace.replace('VERSION' , UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = re_pattern.sub(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ ):
for folder, directories, fnames in os.walk(UpperCAmelCase_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ , pattern='examples' )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if not patch:
update_version_in_examples(UpperCAmelCase_ )
def UpperCamelCase( ):
UpperCAmelCase : Any = '🤗 Transformers currently provides the following architectures'
UpperCAmelCase : int = '1. Want to contribute a new model?'
with open(UpperCAmelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase : Optional[int] = f.readlines()
# Find the start of the list.
UpperCAmelCase : Any = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
UpperCAmelCase : List[str] = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCAmelCase_ )
def UpperCamelCase( ):
with open(REPLACE_FILES['init'] , 'r' ) as f:
UpperCAmelCase : int = f.read()
UpperCAmelCase : Dict = REPLACE_PATTERNS['init'][0].search(UpperCAmelCase_ ).groups()[0]
return packaging.version.parse(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_=False ):
UpperCAmelCase : Optional[int] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
UpperCAmelCase : Union[str, Any] = default_version.base_version
elif patch:
UpperCAmelCase : List[Any] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase : str = input(F"""Which version are you releasing? [{default_version}]""" )
if len(UpperCAmelCase_ ) == 0:
UpperCAmelCase : int = default_version
print(F"""Updating version to {version}.""" )
global_version_update(UpperCAmelCase_ , patch=UpperCAmelCase_ )
def UpperCamelCase( ):
UpperCAmelCase : str = get_version()
UpperCAmelCase : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase : Dict = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase : Optional[Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCAmelCase_ ) == 0:
UpperCAmelCase : List[Any] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(UpperCAmelCase_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
lowercase__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 695 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """dpt"""
def __init__( self : Optional[int] , lowercase_ : Union[str, Any]=768 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Tuple=3_072 , lowercase_ : int="gelu" , lowercase_ : Optional[int]=0.0 , lowercase_ : str=0.0 , lowercase_ : List[Any]=0.02 , lowercase_ : Optional[int]=1E-12 , lowercase_ : Any=384 , lowercase_ : List[Any]=16 , lowercase_ : Any=3 , lowercase_ : Dict=False , lowercase_ : Union[str, Any]=True , lowercase_ : str=[2, 5, 8, 11] , lowercase_ : Union[str, Any]="project" , lowercase_ : Tuple=[4, 2, 1, 0.5] , lowercase_ : Dict=[96, 192, 384, 768] , lowercase_ : List[Any]=256 , lowercase_ : List[Any]=-1 , lowercase_ : Dict=False , lowercase_ : List[str]=True , lowercase_ : List[str]=0.4 , lowercase_ : List[Any]=255 , lowercase_ : Dict=0.1 , lowercase_ : Optional[Any]=[1, 1_024, 24, 24] , lowercase_ : List[Any]=[0, 1] , lowercase_ : Optional[int]=None , **lowercase_ : Any , ) -> List[str]:
super().__init__(**lowercase_ )
UpperCAmelCase : List[Any] = hidden_size
UpperCAmelCase : Optional[int] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
UpperCAmelCase : str = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
UpperCAmelCase : Tuple = BitConfig(**lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
logger.info('Initializing the config with a `BiT` backbone.' )
UpperCAmelCase : Optional[int] = BitConfig(**lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Optional[int] = backbone_config
else:
raise ValueError(
f"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
UpperCAmelCase : int = backbone_featmap_shape
UpperCAmelCase : str = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
UpperCAmelCase : int = None
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : int = []
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : str = intermediate_size
UpperCAmelCase : Tuple = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : int = image_size
UpperCAmelCase : str = patch_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Union[str, Any] = qkv_bias
UpperCAmelCase : Union[str, Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
UpperCAmelCase : int = readout_type
UpperCAmelCase : Union[str, Any] = reassemble_factors
UpperCAmelCase : Any = neck_hidden_sizes
UpperCAmelCase : Dict = fusion_hidden_size
UpperCAmelCase : List[Any] = head_in_index
UpperCAmelCase : Dict = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase : List[Any] = use_auxiliary_head
UpperCAmelCase : str = auxiliary_loss_weight
UpperCAmelCase : Optional[Any] = semantic_loss_ignore_index
UpperCAmelCase : Optional[int] = semantic_classifier_dropout
def UpperCAmelCase_ ( self : str ) -> int:
UpperCAmelCase : int = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase : Tuple = self.backbone_config.to_dict()
UpperCAmelCase : List[str] = self.__class__.model_type
return output
| 695 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 1 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = year % 19
UpperCAmelCase : Union[str, Any] = year % 4
UpperCAmelCase : Optional[int] = year % 7
UpperCAmelCase : str = math.floor(year / 1_00 )
UpperCAmelCase : List[Any] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
UpperCAmelCase : Optional[int] = leap_day_inhibits / 4
UpperCAmelCase : Dict = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCAmelCase : Tuple = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCAmelCase : Any = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCAmelCase : Any = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase_ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase_ , 4 , 18 )
else:
return datetime(UpperCAmelCase_ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
lowercase__ = "will be" if year > datetime.now().year else "was"
print(f'''Easter in {year} {tense} {gauss_easter(year)}''')
| 695 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 1 |
'''simple docstring'''
class A_ : # Public class to implement a graph
'''simple docstring'''
def __init__( self : str , lowercase_ : int , lowercase_ : int , lowercase_ : list[list[bool]] ) -> None:
UpperCAmelCase : List[Any] = row
UpperCAmelCase : str = col
UpperCAmelCase : List[Any] = graph
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : list[list[bool]] ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : list[list[bool]] ) -> None:
# Checking all 8 elements surrounding nth element
UpperCAmelCase : Union[str, Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCAmelCase : Dict = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCAmelCase : str = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowercase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> int: # And finally, count all islands.
UpperCAmelCase : List[Any] = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCAmelCase : Optional[Any] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowercase_ , lowercase_ , lowercase_ )
count += 1
return count
| 695 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A_ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = StableDiffusionInpaintPipeline
UpperCAmelCase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase_ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase_ : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase_ : Optional[Any] = frozenset([] )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , )
UpperCAmelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=lowercase_ )
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
UpperCAmelCase : Optional[Any] = CLIPTextModel(lowercase_ )
UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : Tuple=0 ) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : Tuple = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((64, 64) )
UpperCAmelCase : Dict = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(lowercase_ ).startswith('mps' ):
UpperCAmelCase : int = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self : List[str] ) -> str:
UpperCAmelCase : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase : List[str] = StableDiffusionInpaintPipeline(**lowercase_ )
UpperCAmelCase : List[str] = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Dict = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase : Union[str, Any] = sd_pipe(**lowercase_ ).images
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCAmelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCAmelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
UpperCAmelCase : int = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase : Any = StableDiffusionInpaintPipeline.from_pretrained(lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Optional[Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : Any = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCAmelCase_ ( self : Tuple ) -> int:
UpperCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCAmelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
UpperCAmelCase : List[str] = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase : Tuple = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Optional[Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCAmelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCAmelCase : Dict = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase : List[str] = PNDMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' )
UpperCAmelCase : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , scheduler=lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase : List[str] = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase : Dict = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , )
UpperCAmelCase : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 695 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
UpperCAmelCase : Dict = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
UpperCAmelCase : List[Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCAmelCase_ , 1 ):
if n < _p:
# then we have our last prime to check
UpperCAmelCase : int = primes[:idx]
break
UpperCAmelCase , UpperCAmelCase : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
UpperCAmelCase : str = False
for r in range(UpperCAmelCase_ ):
UpperCAmelCase : str = pow(UpperCAmelCase_ , d * 2**r , UpperCAmelCase_ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
UpperCAmelCase : Any = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCamelCase( ):
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 695 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 1 |
'''simple docstring'''
from collections import namedtuple
lowercase__ = namedtuple("from_to", "from_ to")
lowercase__ = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00454, 264.172),
"cubicyard": from_to(0.76455, 1.30795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.000236588, 4226.75),
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ', '.join(UpperCAmelCase_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(UpperCAmelCase_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ : Any = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ : int = False
UpperCAmelCase_ : int = False
def UpperCAmelCase_ ( self : Any , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]=False ) -> Tuple:
UpperCAmelCase : Tuple = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
UpperCAmelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Any=13 , lowercase_ : str=7 , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Tuple=True , lowercase_ : Optional[int]=99 , lowercase_ : Tuple=32 , lowercase_ : int=32 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=4 , lowercase_ : Tuple=37 , lowercase_ : Any="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : str=16 , lowercase_ : List[Any]=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Dict=3 , lowercase_ : Tuple=4 , lowercase_ : Optional[Any]=None , ) -> List[Any]:
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : str = seq_length
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : Union[str, Any] = use_input_mask
UpperCAmelCase : Optional[Any] = use_token_type_ids
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Optional[Any] = max_position_embeddings
UpperCAmelCase : Union[str, Any] = type_vocab_size
UpperCAmelCase : int = type_sequence_label_size
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : Optional[int] = num_labels
UpperCAmelCase : Dict = num_choices
UpperCAmelCase : int = scope
UpperCAmelCase : Union[str, Any] = embedding_size
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : List[str] = None
if self.use_input_mask:
UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Dict = None
UpperCAmelCase : str = None
UpperCAmelCase : Optional[int] = None
if self.use_labels:
UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : int = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : str ) -> Optional[int]:
UpperCAmelCase : int = TFMobileBertModel(config=lowercase_ )
UpperCAmelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase : Any = model(lowercase_ )
UpperCAmelCase : Tuple = [input_ids, input_mask]
UpperCAmelCase : Union[str, Any] = model(lowercase_ )
UpperCAmelCase : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : str , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Any ) -> List[str]:
UpperCAmelCase : Dict = TFMobileBertForMaskedLM(config=lowercase_ )
UpperCAmelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Any , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = TFMobileBertForNextSentencePrediction(config=lowercase_ )
UpperCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase : Tuple = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : str , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : List[str] ) -> Union[str, Any]:
UpperCAmelCase : int = TFMobileBertForPreTraining(config=lowercase_ )
UpperCAmelCase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase : Tuple = model(lowercase_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ ( self : Dict , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] ) -> Dict:
UpperCAmelCase : Any = self.num_labels
UpperCAmelCase : str = TFMobileBertForSequenceClassification(config=lowercase_ )
UpperCAmelCase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase : int = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ) -> List[Any]:
UpperCAmelCase : Dict = self.num_choices
UpperCAmelCase : Optional[int] = TFMobileBertForMultipleChoice(config=lowercase_ )
UpperCAmelCase : Optional[int] = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase : Any = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase : Any = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase : Any = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCAmelCase : Tuple = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self : int , lowercase_ : int , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str ) -> Dict:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : int = TFMobileBertForTokenClassification(config=lowercase_ )
UpperCAmelCase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any ) -> str:
UpperCAmelCase : Union[str, Any] = TFMobileBertForQuestionAnswering(config=lowercase_ )
UpperCAmelCase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase : Union[str, Any] = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : str = config_and_inputs
UpperCAmelCase : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase_ )
def UpperCAmelCase_ ( self : Tuple ) -> str:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase_ )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
UpperCAmelCase : int = TFMobileBertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
UpperCAmelCase : Union[str, Any] = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
UpperCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase : List[Any] = model(lowercase_ )[0]
UpperCAmelCase : Union[str, Any] = [1, 6, 30_522]
self.assertEqual(output.shape , lowercase_ )
UpperCAmelCase : Any = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1E-4 )
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Any , lowercase_ : int=13 , lowercase_ : Optional[int]=3 , lowercase_ : Union[str, Any]=224 , lowercase_ : int=30 , lowercase_ : Dict=400 , lowercase_ : Tuple=True , lowercase_ : str=None , lowercase_ : str=True , lowercase_ : int=[0.5, 0.5, 0.5] , lowercase_ : Tuple=[0.5, 0.5, 0.5] , ) -> Any:
UpperCAmelCase : Tuple = size if size is not None else {'height': 18, 'width': 18}
UpperCAmelCase : List[str] = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Any = image_size
UpperCAmelCase : Any = min_resolution
UpperCAmelCase : Optional[int] = max_resolution
UpperCAmelCase : str = do_resize
UpperCAmelCase : Tuple = size
UpperCAmelCase : Optional[Any] = do_normalize
UpperCAmelCase : Optional[Any] = image_mean
UpperCAmelCase : int = image_std
def UpperCAmelCase_ ( self : Tuple ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : Any = EfficientFormerImageProcessorTester(self )
@property
def UpperCAmelCase_ ( self : Dict ) -> Dict:
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase_ , 'image_std' ) )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase_ , 'size' ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
pass
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
# Initialize image_processor
UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCAmelCase : Any = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
UpperCAmelCase : List[Any] = image_processor(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
# Initialize image_processor
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
UpperCAmelCase : Any = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
UpperCAmelCase : int = image_processor(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def UpperCAmelCase_ ( self : int ) -> Any:
# Initialize image_processor
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase : Tuple = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
UpperCAmelCase : Optional[int] = image_processor(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase( ):
UpperCAmelCase : str = HfArgumentParser(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase : str = TensorFlowBenchmark(args=UpperCAmelCase_ )
try:
UpperCAmelCase : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase : Optional[int] = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
UpperCAmelCase : Dict = ' '.join(str(UpperCAmelCase_ ).split(' ' )[:-1] )
UpperCAmelCase : Optional[int] = ''
UpperCAmelCase : str = eval(str(UpperCAmelCase_ ).split(' ' )[-1] )
UpperCAmelCase : Optional[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
UpperCAmelCase : Tuple = full_error_msg + begin_error_msg + str(UpperCAmelCase_ )
raise ValueError(UpperCAmelCase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 695 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ ):
if not nums:
return 0
UpperCAmelCase : str = nums[0]
UpperCAmelCase : Optional[int] = 0
for num in nums[1:]:
UpperCAmelCase , UpperCAmelCase : str = (
max_excluding + num,
max(UpperCAmelCase_ , UpperCAmelCase_ ),
)
return max(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
lowercase__ = {str(digit): digit**5 for digit in range(10)}
def UpperCamelCase( UpperCAmelCase_ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCAmelCase_ ) )
def UpperCamelCase( ):
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(UpperCAmelCase_ ) )
if __name__ == "__main__":
print(solution())
| 695 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ["""pixel_values"""]
def __init__( self : List[Any] , lowercase_ : bool = True , lowercase_ : int = 32 , lowercase_ : Union[str, Any]=PILImageResampling.BILINEAR , lowercase_ : bool = True , **lowercase_ : Optional[Any] , ) -> None:
UpperCAmelCase : List[Any] = do_resize
UpperCAmelCase : int = do_rescale
UpperCAmelCase : Tuple = size_divisor
UpperCAmelCase : Any = resample
super().__init__(**lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : int , lowercase_ : str , lowercase_ : Optional[ChannelDimension] = None , **lowercase_ : Tuple ) -> np.ndarray:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_image_size(lowercase_ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase : List[str] = height // size_divisor * size_divisor
UpperCAmelCase : Union[str, Any] = width // size_divisor * size_divisor
UpperCAmelCase : List[str] = resize(lowercase_ , (new_h, new_w) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
return image
def UpperCAmelCase_ ( self : Dict , lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : Optional[ChannelDimension] = None , **lowercase_ : Optional[Any] ) -> np.ndarray:
return rescale(image=lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , lowercase_ : Optional[bool] = None , lowercase_ : Optional[int] = None , lowercase_ : int=None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[TensorType, str]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : List[Any] , ) -> BatchFeature:
UpperCAmelCase : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Dict = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase : Tuple = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
UpperCAmelCase : List[Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
UpperCAmelCase : Any = [to_numpy_array(lowercase_ ) for img in images]
if do_resize:
UpperCAmelCase : Union[str, Any] = [self.resize(lowercase_ , size_divisor=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase : str = [self.rescale(lowercase_ , scale=1 / 255 ) for image in images]
UpperCAmelCase : Dict = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase : Tuple = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 695 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
class A_ :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Union[str, Any] = data
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
def UpperCamelCase( UpperCAmelCase_ ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def UpperCamelCase( UpperCAmelCase_ ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def UpperCamelCase( UpperCAmelCase_ ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def UpperCamelCase( ): # Main function for testing.
UpperCAmelCase : Optional[Any] = Node(1 )
UpperCAmelCase : Tuple = Node(2 )
UpperCAmelCase : List[str] = Node(3 )
UpperCAmelCase : Any = Node(4 )
UpperCAmelCase : Union[str, Any] = Node(5 )
UpperCAmelCase : Any = Node(6 )
UpperCAmelCase : Union[str, Any] = Node(7 )
UpperCAmelCase : List[Any] = Node(8 )
UpperCAmelCase : Tuple = Node(9 )
print(is_full_binary_tree(UpperCAmelCase_ ) )
print(depth_of_tree(UpperCAmelCase_ ) )
print('Tree is: ' )
display(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 695 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 1 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowercase__ = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
lowercase__ = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
lowercase__ = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Tuple=None , lowercase_ : str=None , lowercase_ : Optional[int]=False ) -> str:
if concatenate_texts:
return compute_measures(lowercase_ , lowercase_ )["wer"]
else:
UpperCAmelCase : int = 0
UpperCAmelCase : List[str] = 0
for prediction, reference in zip(lowercase_ , lowercase_ ):
UpperCAmelCase : List[str] = compute_measures(lowercase_ , lowercase_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 695 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = "▁"
lowercase__ = {"vocab_file": "spiece.model"}
lowercase__ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
lowercase__ = {
"google/pegasus-xsum": 512,
}
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase_ : int = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , lowercase_ : int , lowercase_ : Optional[Any]="<pad>" , lowercase_ : Dict="</s>" , lowercase_ : int="<unk>" , lowercase_ : Optional[int]="<mask_2>" , lowercase_ : str="<mask_1>" , lowercase_ : List[str]=None , lowercase_ : List[Any]=103 , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Union[str, Any] , ) -> None:
UpperCAmelCase : Any = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(lowercase_ )}, but is"""
f""" {type(lowercase_ )}""" )
UpperCAmelCase : Optional[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
UpperCAmelCase : Optional[Any] = additional_special_tokens_extended
else:
UpperCAmelCase : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
UpperCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , pad_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase : Dict = mask_token_sent
UpperCAmelCase : Any = vocab_file
UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
# add special tokens to encoder dict
UpperCAmelCase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
UpperCAmelCase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def UpperCAmelCase_ ( self : Any ) -> int:
return len(self.sp_model ) + self.offset
def UpperCAmelCase_ ( self : str ) -> Dict[str, int]:
UpperCAmelCase : List[str] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase : Tuple = self.__dict__.copy()
UpperCAmelCase : Optional[int] = None
return state
def __setstate__( self : List[str] , lowercase_ : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Tuple = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase : Optional[Any] = {}
UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : str ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase_ ( self : Any , lowercase_ : str ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
UpperCAmelCase : Union[str, Any] = self.sp_model.piece_to_id(lowercase_ )
return sp_id + self.offset
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
UpperCAmelCase : int = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : str = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
UpperCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def UpperCAmelCase_ ( self : int , lowercase_ : List[str]=False ) -> int:
return 1
def UpperCAmelCase_ ( self : Tuple , lowercase_ : List[Any] ) -> Any:
UpperCAmelCase : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase_ ( self : Dict , lowercase_ : List , lowercase_ : Optional[List] = None , lowercase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase_ ( self : Dict , lowercase_ : Tuple , lowercase_ : Any=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : Union[str, Any] = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
lowercase__ = 8.3144598
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowercase__ = 300
lowercase__ = 28
lowercase__ = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """dpr"""
def __init__( self : Union[str, Any] , lowercase_ : Optional[int]=30_522 , lowercase_ : Any=768 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Dict=3_072 , lowercase_ : Optional[int]="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Dict=512 , lowercase_ : Tuple=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Dict=1E-12 , lowercase_ : Any=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : Optional[Any] , ) -> List[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : Tuple = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : int = intermediate_size
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : List[str] = max_position_embeddings
UpperCAmelCase : Optional[int] = type_vocab_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = projection_dim
UpperCAmelCase : Dict = position_embedding_type
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if index == r:
for j in range(UpperCAmelCase_ ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCAmelCase : str = arr[i]
combination_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , index + 1 , UpperCAmelCase_ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# A temporary array to store all combination one by one
UpperCAmelCase : str = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , 0 , UpperCAmelCase_ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowercase__ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 695 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 | 1 |
'''simple docstring'''
import json
import sys
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
with open(UpperCAmelCase_ , encoding='utf-8' ) as f:
UpperCAmelCase : Optional[Any] = json.load(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = results[benchmark_name]
UpperCAmelCase : List[str] = benchmark_name.split('/' )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
UpperCAmelCase : Union[str, Any] = '| metric |'
UpperCAmelCase : List[Any] = '|--------|'
UpperCAmelCase : int = '| new / old (diff) |'
for metric_name in sorted(UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = benchmark_res[metric_name]
UpperCAmelCase : List[Any] = metric_vals['new']
UpperCAmelCase : Any = metric_vals.get('old' , UpperCAmelCase_ )
UpperCAmelCase : int = metric_vals.get('diff' , UpperCAmelCase_ )
UpperCAmelCase : Dict = F""" {new_val:f}""" if isinstance(UpperCAmelCase_ , (int, float) ) else 'None'
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(UpperCAmelCase_ , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(UpperCAmelCase_ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(UpperCAmelCase_ ) )
if __name__ == "__main__":
lowercase__ = sys.argv[1]
lowercase__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , ):
if config_name_or_path is None:
UpperCAmelCase : Any = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
UpperCAmelCase : Tuple = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCAmelCase : Union[str, Any] = question_encoder_name_or_path
UpperCAmelCase : Dict = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
UpperCAmelCase : List[str] = RagConfig.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : Dict = gen_config
UpperCAmelCase : Any = question_encoder_config
UpperCAmelCase : str = model_class.from_pretrained_question_encoder_generator(
UpperCAmelCase_ , UpperCAmelCase_ , config=UpperCAmelCase_ )
rag_model.save_pretrained(UpperCAmelCase_ )
# Sanity check.
model_class.from_pretrained(UpperCAmelCase_ )
# Save tokenizers.
UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
lowercase__ = parser.parse_args()
lowercase__ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 695 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 1 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# Initialise PyTorch model
UpperCAmelCase : Optional[Any] = BigBirdConfig.from_json_file(UpperCAmelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
UpperCAmelCase : List[str] = BigBirdForQuestionAnswering(UpperCAmelCase_ )
else:
UpperCAmelCase : List[Any] = BigBirdForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCAmelCase_ , UpperCAmelCase_ , is_trivia_qa=UpperCAmelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
lowercase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 695 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 1 |
'''simple docstring'''
import os
from math import logaa
def UpperCamelCase( UpperCAmelCase_ = "base_exp.txt" ):
UpperCAmelCase : float = 0
UpperCAmelCase : Any = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) ):
UpperCAmelCase , UpperCAmelCase : int = list(map(UpperCAmelCase_ , line.split(',' ) ) )
if x * logaa(UpperCAmelCase_ ) > largest:
UpperCAmelCase : List[Any] = x * logaa(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 695 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowercase__ = logging.get_logger(__name__)
@dataclass
class A_ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : List[Any]=False , lowercase_ : int=False , lowercase_ : str=6.0 , lowercase_ : int=None , lowercase_ : List[str]=False , lowercase_ : Dict=False , lowercase_ : str=None , lowercase_ : Any="fp4" , lowercase_ : Optional[Any]=False , **lowercase_ : Union[str, Any] , ) -> int:
UpperCAmelCase : Optional[int] = load_in_abit
UpperCAmelCase : Union[str, Any] = load_in_abit
UpperCAmelCase : Tuple = llm_inta_threshold
UpperCAmelCase : int = llm_inta_skip_modules
UpperCAmelCase : str = llm_inta_enable_fpaa_cpu_offload
UpperCAmelCase : str = llm_inta_has_fpaa_weight
UpperCAmelCase : List[Any] = bnb_abit_quant_type
UpperCAmelCase : Optional[int] = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
UpperCAmelCase : Any = torch.floataa
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Optional[Any] = getattr(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , torch.dtype ):
UpperCAmelCase : List[Any] = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def UpperCAmelCase_ ( self : int ) -> Tuple:
if not isinstance(self.llm_inta_threshold , lowercase_ ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowercase_ ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowercase_ ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , lowercase_ ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , lowercase_ ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , lowercase_ ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def UpperCAmelCase_ ( self : int ) -> Any:
return self.load_in_abit or self.load_in_abit
def UpperCAmelCase_ ( self : int ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , **lowercase_ : List[str] ) -> Optional[Any]:
UpperCAmelCase : int = cls(**lowercase_ )
UpperCAmelCase : Dict = []
for key, value in kwargs.items():
if hasattr(lowercase_ , lowercase_ ):
setattr(lowercase_ , lowercase_ , lowercase_ )
to_remove.append(lowercase_ )
for key in to_remove:
kwargs.pop(lowercase_ , lowercase_ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCAmelCase_ ( self : Any , lowercase_ : Union[str, os.PathLike] ) -> List[str]:
with open(lowercase_ , 'w' , encoding='utf-8' ) as writer:
UpperCAmelCase : Tuple = self.to_dict()
UpperCAmelCase : int = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + '\n'
writer.write(lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> Dict[str, Any]:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[Any] = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self : Union[str, Any] ) -> Optional[Any]:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def UpperCAmelCase_ ( self : int , lowercase_ : bool = True ) -> str:
if use_diff is True:
UpperCAmelCase : str = self.to_diff_dict()
else:
UpperCAmelCase : List[str] = self.to_dict()
return json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + "\n"
def UpperCAmelCase_ ( self : Dict ) -> Dict[str, Any]:
UpperCAmelCase : List[str] = self.to_dict()
# get the default config dict
UpperCAmelCase : Any = BitsAndBytesConfig().to_dict()
UpperCAmelCase : Optional[Any] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
UpperCAmelCase : Any = value
return serializable_config_dict
| 695 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """trajectory_transformer"""
UpperCAmelCase_ : Any = ["""past_key_values"""]
UpperCAmelCase_ : Any = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[Any] , lowercase_ : List[Any]=100 , lowercase_ : Dict=5 , lowercase_ : Optional[int]=1 , lowercase_ : int=1 , lowercase_ : int=249 , lowercase_ : Any=6 , lowercase_ : Tuple=17 , lowercase_ : Dict=25 , lowercase_ : List[Any]=4 , lowercase_ : Tuple=4 , lowercase_ : Dict=128 , lowercase_ : List[str]=0.1 , lowercase_ : int=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : int=0.0006 , lowercase_ : int=512 , lowercase_ : int=0.02 , lowercase_ : Dict=1E-12 , lowercase_ : List[str]=1 , lowercase_ : Union[str, Any]=True , lowercase_ : Tuple=1 , lowercase_ : Any=50_256 , lowercase_ : Optional[Any]=50_256 , **lowercase_ : Any , ) -> int:
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : Optional[Any] = action_weight
UpperCAmelCase : Tuple = reward_weight
UpperCAmelCase : Union[str, Any] = value_weight
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : Tuple = block_size
UpperCAmelCase : Optional[int] = action_dim
UpperCAmelCase : Tuple = observation_dim
UpperCAmelCase : Any = transition_dim
UpperCAmelCase : Dict = learning_rate
UpperCAmelCase : Any = n_layer
UpperCAmelCase : str = n_head
UpperCAmelCase : Optional[int] = n_embd
UpperCAmelCase : List[str] = embd_pdrop
UpperCAmelCase : Union[str, Any] = attn_pdrop
UpperCAmelCase : str = resid_pdrop
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Dict = layer_norm_eps
UpperCAmelCase : Optional[Any] = kaiming_initializer_range
UpperCAmelCase : Union[str, Any] = use_cache
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 695 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 1 |
'''simple docstring'''
from timeit import timeit
lowercase__ = {
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : str = len(UpperCAmelCase_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = len(UpperCAmelCase_ ) // 2
UpperCAmelCase : int = len(UpperCAmelCase_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(UpperCAmelCase_ ) )
def UpperCamelCase( UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) <= 2:
return True
if s[0] == s[len(UpperCAmelCase_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def UpperCamelCase( UpperCAmelCase_ ):
return s == s[::-1]
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = F"""all({name}(key) is value for key, value in test_data.items())"""
UpperCAmelCase : Optional[int] = F"""from __main__ import test_data, {name}"""
UpperCAmelCase : List[str] = 50_00_00
UpperCAmelCase : List[Any] = timeit(stmt=UpperCAmelCase_ , setup=UpperCAmelCase_ , number=UpperCAmelCase_ )
print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'''{key:21} {value}''')
print("a man a plan a canal panama")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("is_palindrome_slice")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("is_palindrome")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("is_palindrome_recursive")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("is_palindrome_traversal")
| 695 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase__ = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class A_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase_ ( cls : int ) -> Dict:
UpperCAmelCase : Union[str, Any] = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def UpperCAmelCase_ ( cls : Tuple ) -> Any:
try:
delete_repo(token=cls._token , repo_id='test-model-flax' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org' )
except HTTPError:
pass
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
UpperCAmelCase : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : str = FlaxBertModel(lowercase_ )
model.push_to_hub('test-model-flax' , use_auth_token=self._token )
UpperCAmelCase : Any = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
UpperCAmelCase : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1E-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase_ , repo_id='test-model-flax' , push_to_hub=lowercase_ , use_auth_token=self._token )
UpperCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
UpperCAmelCase : List[str] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Union[str, Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1E-3 , msg=f"""{key} not identical""" )
def UpperCAmelCase_ ( self : str ) -> List[Any]:
UpperCAmelCase : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : Optional[Any] = FlaxBertModel(lowercase_ )
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token )
UpperCAmelCase : str = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
UpperCAmelCase : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1E-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowercase_ , repo_id='valid_org/test-model-flax-org' , push_to_hub=lowercase_ , use_auth_token=self._token )
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
UpperCAmelCase : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1E-3 , msg=f"""{key} not identical""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = True
UpperCAmelCase : str = flatten_dict(modela.params )
UpperCAmelCase : Tuple = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
UpperCAmelCase : Optional[int] = False
return models_are_equal
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
UpperCAmelCase : str = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
UpperCAmelCase : List[str] = FlaxBertModel(lowercase_ )
UpperCAmelCase : List[Any] = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase_ , lowercase_ ) )
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = FlaxBertModel.from_pretrained(lowercase_ )
UpperCAmelCase : int = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertTrue(check_models_equal(lowercase_ , lowercase_ ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
UpperCAmelCase : int = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
UpperCAmelCase : Optional[int] = FlaxBertModel(lowercase_ )
UpperCAmelCase : int = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase_ , lowercase_ ) , max_shard_size='10KB' )
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(lowercase_ )
UpperCAmelCase : List[str] = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertTrue(check_models_equal(lowercase_ , lowercase_ ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
UpperCAmelCase : List[Any] = 'bert'
UpperCAmelCase : Any = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(lowercase_ )
UpperCAmelCase : List[Any] = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
UpperCAmelCase : Any = 'bert'
UpperCAmelCase : str = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Dict = FlaxBertModel.from_pretrained(lowercase_ )
UpperCAmelCase : str = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertIsNotNone(lowercase_ )
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
import pytest
lowercase__ = "__dummy_dataset1__"
lowercase__ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def UpperCamelCase( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = dataset_loading_script_name
UpperCAmelCase : Tuple = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=UpperCAmelCase_ )
UpperCAmelCase : Any = script_dir / F"""{script_name}.py"""
with open(UpperCAmelCase_ , 'w' ) as f:
f.write(UpperCAmelCase_ )
return str(UpperCAmelCase_ )
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 1 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowercase__ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowercase__ = "main"
# Default branch name
lowercase__ = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
lowercase__ = "aaaaaaa"
# This commit does not exist, so we should 404.
lowercase__ = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
lowercase__ = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def UpperCamelCase( ):
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def UpperCamelCase( ):
print('Bonjour!' )
yield
print('Au revoir!' )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class A_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> str:
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCAmelCase_ ( self : Dict , lowercase_ : Union[str, Any] ) -> int:
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] ) -> List[str]:
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
self.assertEqual(find_labels(lowercase_ ) , ['labels'] )
self.assertEqual(find_labels(lowercase_ ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(lowercase_ ) , ['start_positions', 'end_positions'] )
class A_ ( _snake_case ):
'''simple docstring'''
pass
self.assertEqual(find_labels(lowercase_ ) , ['labels'] )
@require_tf
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
self.assertEqual(find_labels(lowercase_ ) , ['labels'] )
self.assertEqual(find_labels(lowercase_ ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(lowercase_ ) , ['start_positions', 'end_positions'] )
class A_ ( _snake_case ):
'''simple docstring'''
pass
self.assertEqual(find_labels(lowercase_ ) , ['labels'] )
@require_flax
def UpperCAmelCase_ ( self : str ) -> int:
# Flax models don't have labels
self.assertEqual(find_labels(lowercase_ ) , [] )
self.assertEqual(find_labels(lowercase_ ) , [] )
self.assertEqual(find_labels(lowercase_ ) , [] )
class A_ ( _snake_case ):
'''simple docstring'''
pass
self.assertEqual(find_labels(lowercase_ ) , [] )
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase__ = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
UpperCAmelCase : Union[str, Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
UpperCAmelCase : int = self.transformer_dir
shutil.copy(
os.path.join(lowercase_ , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : Optional[Any] = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def UpperCAmelCase_ ( self : int , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any]=None ) -> Tuple:
UpperCAmelCase : List[str] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
UpperCAmelCase : Optional[int] = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
UpperCAmelCase : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
UpperCAmelCase : Union[str, Any] = black.format_str(lowercase_ , mode=lowercase_ )
UpperCAmelCase : str = os.path.join(self.transformer_dir , 'new_code.py' )
with open(lowercase_ , 'w' , newline='\n' ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , 'r' ) as f:
self.assertTrue(f.read() , lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : int = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , lowercase_ ) , )
# Copy consistency with a really long name
UpperCAmelCase : Optional[int] = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub('Bert' , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , lowercase_ , overwrite_result=re.sub('Bert' , 'TestModel' , lowercase_ ) , )
def UpperCAmelCase_ ( self : Dict ) -> Any:
UpperCAmelCase : Dict = check_copies.LOCALIZED_READMES['README_zh-hans.md']
UpperCAmelCase : Tuple = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
UpperCAmelCase : Dict = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
UpperCAmelCase : Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
UpperCAmelCase , UpperCAmelCase : List[str] = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme['format_model_list'] )
self.assertFalse(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[int] = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowercase_ )
UpperCAmelCase : str = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
UpperCAmelCase : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
UpperCAmelCase : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(lowercase_ , lowercase_ )
| 695 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 0
UpperCAmelCase : Any = len(UpperCAmelCase_ ) # No of vertices in graph
UpperCAmelCase : int = [0] * n
UpperCAmelCase : List[str] = [False] * n
def dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = True
UpperCAmelCase : List[str] = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , id_ )
UpperCAmelCase : List[str] = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
UpperCAmelCase : Union[str, Any] = min(low[at] , low[to] )
UpperCAmelCase : list[tuple[int, int]] = []
for i in range(UpperCAmelCase_ ):
if not visited[i]:
dfs(UpperCAmelCase_ , -1 , UpperCAmelCase_ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.