code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = checkpoints.load_tax_checkpoint(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__SCREAMING_SNAKE_CASE : int = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__SCREAMING_SNAKE_CASE : int = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
__SCREAMING_SNAKE_CASE : Optional[int] = F'''layers_{str(lowercase__ )}'''
# Self-Attention
__SCREAMING_SNAKE_CASE : str = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
__SCREAMING_SNAKE_CASE : str = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
__SCREAMING_SNAKE_CASE : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
__SCREAMING_SNAKE_CASE : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__SCREAMING_SNAKE_CASE : Any = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
__SCREAMING_SNAKE_CASE : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
__SCREAMING_SNAKE_CASE : str = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
__SCREAMING_SNAKE_CASE : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
__SCREAMING_SNAKE_CASE : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
__SCREAMING_SNAKE_CASE : int = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
__SCREAMING_SNAKE_CASE : Dict = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
__SCREAMING_SNAKE_CASE : Optional[Any] = flax_model.params['''encoder''']['''block'''][str(lowercase__ )]['''layer''']
__SCREAMING_SNAKE_CASE : Optional[Any] = tax_attention_key
__SCREAMING_SNAKE_CASE : Optional[Any] = tax_attention_out
__SCREAMING_SNAKE_CASE : Optional[Any] = tax_attention_query
__SCREAMING_SNAKE_CASE : Dict = tax_attention_value
__SCREAMING_SNAKE_CASE : Dict = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__SCREAMING_SNAKE_CASE : List[Any] = tax_global_layer_norm
if split_mlp_wi:
__SCREAMING_SNAKE_CASE : Optional[Any] = tax_mlp_wi_a
__SCREAMING_SNAKE_CASE : Tuple = tax_mlp_wi_a
else:
__SCREAMING_SNAKE_CASE : int = tax_mlp_wi
__SCREAMING_SNAKE_CASE : int = tax_mlp_wo
__SCREAMING_SNAKE_CASE : Dict = tax_mlp_layer_norm
__SCREAMING_SNAKE_CASE : Any = flax_model_encoder_layer_block
# Only for layer 0:
__SCREAMING_SNAKE_CASE : str = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
__SCREAMING_SNAKE_CASE : Union[str, Any] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__SCREAMING_SNAKE_CASE : int = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
__SCREAMING_SNAKE_CASE : str = tax_encoder_global_rel_embedding
# Assigning
__SCREAMING_SNAKE_CASE : Any = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
__SCREAMING_SNAKE_CASE : List[str] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''layers_{str(lowercase__ )}'''
# Self-Attention
__SCREAMING_SNAKE_CASE : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
__SCREAMING_SNAKE_CASE : str = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
__SCREAMING_SNAKE_CASE : Dict = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
__SCREAMING_SNAKE_CASE : Any = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
__SCREAMING_SNAKE_CASE : Dict = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
__SCREAMING_SNAKE_CASE : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
__SCREAMING_SNAKE_CASE : int = tax_enc_dec_attention_module['''key''']['''kernel''']
__SCREAMING_SNAKE_CASE : List[Any] = tax_enc_dec_attention_module['''out''']['''kernel''']
__SCREAMING_SNAKE_CASE : List[Any] = tax_enc_dec_attention_module['''query''']['''kernel''']
__SCREAMING_SNAKE_CASE : Any = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
__SCREAMING_SNAKE_CASE : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
__SCREAMING_SNAKE_CASE : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
__SCREAMING_SNAKE_CASE : str = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
__SCREAMING_SNAKE_CASE : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
__SCREAMING_SNAKE_CASE : int = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
__SCREAMING_SNAKE_CASE : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
__SCREAMING_SNAKE_CASE : Optional[int] = flax_model.params['''decoder''']['''block'''][str(lowercase__ )]['''layer''']
__SCREAMING_SNAKE_CASE : int = tax_attention_key
__SCREAMING_SNAKE_CASE : List[str] = tax_attention_out
__SCREAMING_SNAKE_CASE : Any = tax_attention_query
__SCREAMING_SNAKE_CASE : Union[str, Any] = tax_attention_value
__SCREAMING_SNAKE_CASE : Dict = tax_pre_attention_layer_norm
__SCREAMING_SNAKE_CASE : List[Any] = tax_enc_dec_attention_key
__SCREAMING_SNAKE_CASE : int = tax_enc_dec_attention_out
__SCREAMING_SNAKE_CASE : Union[str, Any] = tax_enc_dec_attention_query
__SCREAMING_SNAKE_CASE : Optional[Any] = tax_enc_dec_attention_value
__SCREAMING_SNAKE_CASE : int = tax_cross_layer_norm
if split_mlp_wi:
__SCREAMING_SNAKE_CASE : int = tax_mlp_wi_a
__SCREAMING_SNAKE_CASE : List[str] = tax_mlp_wi_a
else:
__SCREAMING_SNAKE_CASE : Tuple = tax_mlp_wi
__SCREAMING_SNAKE_CASE : str = tax_mlp_wo
__SCREAMING_SNAKE_CASE : Optional[int] = txa_mlp_layer_norm
__SCREAMING_SNAKE_CASE : Any = flax_model_decoder_layer_block
# Decoder Normalization
__SCREAMING_SNAKE_CASE : List[str] = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
__SCREAMING_SNAKE_CASE : List[str] = txa_decoder_norm
# Only for layer 0:
__SCREAMING_SNAKE_CASE : List[str] = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
__SCREAMING_SNAKE_CASE : List[Any] = tax_decoder_rel_embedding
# Token Embeddings
__SCREAMING_SNAKE_CASE : int = tax_model['''target''']['''token_embedder''']['''embedding''']
__SCREAMING_SNAKE_CASE : int = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__SCREAMING_SNAKE_CASE : List[Any] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(lowercase__ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
__lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
__lowerCAmelCase : Any =parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''canine'''
def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
# Character config:
__SCREAMING_SNAKE_CASE : Tuple = downsampling_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size
__SCREAMING_SNAKE_CASE : Any = num_hash_functions
__SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets
__SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
| 696 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE : Tuple = []
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2
@register_to_config
def __init__( self :Union[str, Any] , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0_0085 , lowerCAmelCase__ :float = 0.012 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :str = "linspace" , lowerCAmelCase__ :int = 0 , ) -> Dict:
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : Any = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : Tuple = betas_for_alpha_bar(lowerCAmelCase__ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str=None ) -> List[str]:
if schedule_timesteps is None:
__SCREAMING_SNAKE_CASE : List[str] = self.timesteps
__SCREAMING_SNAKE_CASE : Optional[int] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__SCREAMING_SNAKE_CASE : List[str] = 1 if len(lowerCAmelCase__ ) > 1 else 0
else:
__SCREAMING_SNAKE_CASE : List[Any] = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase__ ) else timestep
__SCREAMING_SNAKE_CASE : List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __magic_name__( self :Tuple ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __magic_name__( self :Any , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
__SCREAMING_SNAKE_CASE : Tuple = self.index_for_timestep(lowerCAmelCase__ )
if self.state_in_first_order:
__SCREAMING_SNAKE_CASE : List[Any] = self.sigmas[step_index]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.sigmas_interpol[step_index]
__SCREAMING_SNAKE_CASE : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __magic_name__( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None , lowerCAmelCase__ :Optional[int] = None , ) -> int:
__SCREAMING_SNAKE_CASE : str = num_inference_steps
__SCREAMING_SNAKE_CASE : List[str] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__SCREAMING_SNAKE_CASE : Optional[Any] = np.linspace(0 , num_train_timesteps - 1 , lowerCAmelCase__ , dtype=lowerCAmelCase__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__SCREAMING_SNAKE_CASE : List[str] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[Any] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__SCREAMING_SNAKE_CASE : Optional[Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : int = (np.arange(lowerCAmelCase__ , 0 , -step_ratio )).round().copy().astype(lowerCAmelCase__ )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__SCREAMING_SNAKE_CASE : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(np.log(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = np.interp(lowerCAmelCase__ , np.arange(0 , len(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__SCREAMING_SNAKE_CASE : Any = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ )
# interpolate sigmas
__SCREAMING_SNAKE_CASE : Optional[int] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__SCREAMING_SNAKE_CASE : Dict = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__SCREAMING_SNAKE_CASE : Tuple = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
# mps does not support float64
__SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ , dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
# interpolate timesteps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.sigma_to_t(lowerCAmelCase__ ).to(lowerCAmelCase__ , dtype=timesteps.dtype )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([timesteps[:1], interleaved_timesteps] )
__SCREAMING_SNAKE_CASE : List[str] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__SCREAMING_SNAKE_CASE : List[str] = defaultdict(lowerCAmelCase__ )
def __magic_name__( self :str , lowerCAmelCase__ :str ) -> int:
# get log sigma
__SCREAMING_SNAKE_CASE : Dict = sigma.log()
# get distribution
__SCREAMING_SNAKE_CASE : Union[str, Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__SCREAMING_SNAKE_CASE : Optional[int] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__SCREAMING_SNAKE_CASE : int = low_idx + 1
__SCREAMING_SNAKE_CASE : Optional[int] = self.log_sigmas[low_idx]
__SCREAMING_SNAKE_CASE : Optional[int] = self.log_sigmas[high_idx]
# interpolate sigmas
__SCREAMING_SNAKE_CASE : Any = (low - log_sigma) / (low - high)
__SCREAMING_SNAKE_CASE : Optional[Any] = w.clamp(0 , 1 )
# transform interpolation to time range
__SCREAMING_SNAKE_CASE : List[str] = (1 - w) * low_idx + w * high_idx
__SCREAMING_SNAKE_CASE : str = t.view(sigma.shape )
return t
@property
def __magic_name__( self :int ) -> List[str]:
return self.sample is None
def __magic_name__( self :Any , lowerCAmelCase__ :Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase__ :Union[float, torch.FloatTensor] , lowerCAmelCase__ :Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase__ :bool = True , ) -> Union[SchedulerOutput, Tuple]:
__SCREAMING_SNAKE_CASE : int = self.index_for_timestep(lowerCAmelCase__ )
# advance index counter by 1
__SCREAMING_SNAKE_CASE : Any = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__SCREAMING_SNAKE_CASE : Dict = self.sigmas[step_index]
__SCREAMING_SNAKE_CASE : Optional[int] = self.sigmas_interpol[step_index + 1]
__SCREAMING_SNAKE_CASE : Optional[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__SCREAMING_SNAKE_CASE : Any = self.sigmas[step_index - 1]
__SCREAMING_SNAKE_CASE : Any = self.sigmas_interpol[step_index]
__SCREAMING_SNAKE_CASE : Optional[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol
__SCREAMING_SNAKE_CASE : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : Any = sigma_hat if self.state_in_first_order else sigma_interpol
__SCREAMING_SNAKE_CASE : Any = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__SCREAMING_SNAKE_CASE : List[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__SCREAMING_SNAKE_CASE : Any = sigma_interpol - sigma_hat
# store for 2nd order step
__SCREAMING_SNAKE_CASE : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__SCREAMING_SNAKE_CASE : List[str] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__SCREAMING_SNAKE_CASE : Union[str, Any] = sigma_next - sigma_hat
__SCREAMING_SNAKE_CASE : Tuple = self.sample
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase__ )
def __magic_name__( self :Any , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__SCREAMING_SNAKE_CASE : Dict = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase__ ):
# mps does not support float64
__SCREAMING_SNAKE_CASE : int = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE : Optional[Any] = timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.index_for_timestep(lowerCAmelCase__ , lowerCAmelCase__ ) for t in timesteps]
__SCREAMING_SNAKE_CASE : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE : List[Any] = sigma.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self :Any ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''mems''']
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = []
self.cutoffs.extend(lowerCAmelCase__ )
if proj_share_all_but_first:
__SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs )
else:
__SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs )
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed
__SCREAMING_SNAKE_CASE : Tuple = d_head
__SCREAMING_SNAKE_CASE : Dict = d_inner
__SCREAMING_SNAKE_CASE : Optional[Any] = div_val
__SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm
__SCREAMING_SNAKE_CASE : List[str] = n_layer
__SCREAMING_SNAKE_CASE : int = n_head
__SCREAMING_SNAKE_CASE : str = mem_len
__SCREAMING_SNAKE_CASE : Union[str, Any] = same_length
__SCREAMING_SNAKE_CASE : str = attn_type
__SCREAMING_SNAKE_CASE : Dict = clamp_len
__SCREAMING_SNAKE_CASE : Tuple = sample_softmax
__SCREAMING_SNAKE_CASE : Optional[int] = adaptive
__SCREAMING_SNAKE_CASE : int = dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = dropatt
__SCREAMING_SNAKE_CASE : int = untie_r
__SCREAMING_SNAKE_CASE : Optional[int] = init
__SCREAMING_SNAKE_CASE : List[str] = init_range
__SCREAMING_SNAKE_CASE : Any = proj_init_std
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :str ) -> int:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 696 | 1 |
import math
def _UpperCamelCase ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowercase__ )
if number < 1:
__SCREAMING_SNAKE_CASE : Dict = F'''Input value of [number={number}] must be > 0'''
raise ValueError(lowercase__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__SCREAMING_SNAKE_CASE : int = int(math.log(number // 3 , 2 ) ) + 2
__SCREAMING_SNAKE_CASE : Optional[Any] = [3, 5]
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : int = 3
for block in range(1 , lowercase__ ):
for _ in range(lowercase__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__lowerCAmelCase : Union[str, Any] =0
try:
__lowerCAmelCase : Any =proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : Any ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert'''
def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = position_embedding_type
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
| 696 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _lowercase :
'''simple docstring'''
def __init__( self :str , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple=13 , lowerCAmelCase__ :List[Any]=7 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :int=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :List[Any]=19 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :int=5 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :str=37 , lowerCAmelCase__ :Tuple="gelu" , lowerCAmelCase__ :List[Any]=0.1 , lowerCAmelCase__ :List[Any]=0.1 , lowerCAmelCase__ :Union[str, Any]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :List[str]=0.02 , lowerCAmelCase__ :str=3 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :Union[str, Any]=None , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : Tuple = seq_length
__SCREAMING_SNAKE_CASE : str = is_training
__SCREAMING_SNAKE_CASE : List[str] = use_input_mask
__SCREAMING_SNAKE_CASE : Optional[Any] = use_token_type_ids
__SCREAMING_SNAKE_CASE : Dict = use_labels
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : Any = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Any = max_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : int = num_labels
__SCREAMING_SNAKE_CASE : Any = num_choices
__SCREAMING_SNAKE_CASE : List[str] = scope
def __magic_name__( self :Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__( self :str ) -> str:
__SCREAMING_SNAKE_CASE : Tuple = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowerCAmelCase__ , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : int = EsmForProteinFolding(config=lowerCAmelCase__ ).float()
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def __magic_name__( self :Any ) -> List[str]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Any = (EsmForProteinFolding,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = {} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ : Any = False
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[int] = EsmFoldModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__( self :Optional[int] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :str ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
@unittest.skip('''Does not support attention outputs''' )
def __magic_name__( self :Any ) -> Optional[Any]:
pass
@unittest.skip
def __magic_name__( self :Optional[Any] ) -> str:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __magic_name__( self :Any ) -> Any:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __magic_name__( self :Optional[int] ) -> List[Any]:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def __magic_name__( self :List[Any] ) -> Dict:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __magic_name__( self :Dict ) -> Any:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __magic_name__( self :Optional[Any] ) -> List[str]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __magic_name__( self :Union[str, Any] ) -> Any:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __magic_name__( self :int ) -> List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __magic_name__( self :Dict ) -> List[Any]:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def __magic_name__( self :Optional[int] ) -> List[Any]:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def __magic_name__( self :str ) -> Optional[Any]:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def __magic_name__( self :Tuple ) -> Tuple:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def __magic_name__( self :Union[str, Any] ) -> Dict:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __magic_name__( self :Union[str, Any] ) -> Any:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __magic_name__( self :int ) -> Union[str, Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __magic_name__( self :int ) -> Union[str, Any]:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def __magic_name__( self :List[Any] ) -> Optional[int]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
pass
@require_torch
class _lowercase ( A__ ):
'''simple docstring'''
@slow
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
__SCREAMING_SNAKE_CASE : str = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ )['''positions''']
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowerCAmelCase__ , atol=1E-4 ) )
| 696 |
import os
import sys
import unittest
__lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers')
__lowerCAmelCase : Optional[Any] ='\n{0} = None\n'
__lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
__lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tokenizers''' )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' )
__SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' )
__SCREAMING_SNAKE_CASE : Any = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' )
__SCREAMING_SNAKE_CASE : List[str] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCAmelCase__ )
self.assertIn('''tensorflow_text''' , lowerCAmelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' )
__SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__SCREAMING_SNAKE_CASE : int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
| 696 | 1 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = k_size // 2
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__SCREAMING_SNAKE_CASE : int = 1 / (2 * pi * sigma) * exp(-(square(lowercase__ ) + square(lowercase__ )) / (2 * square(lowercase__ )) )
return g
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = image.shape[0], image.shape[1]
# dst image height and width
__SCREAMING_SNAKE_CASE : int = height - k_size + 1
__SCREAMING_SNAKE_CASE : int = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__SCREAMING_SNAKE_CASE : List[Any] = zeros((dst_height * dst_width, k_size * k_size) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for i, j in product(range(lowercase__ ) , range(lowercase__ ) ):
__SCREAMING_SNAKE_CASE : str = ravel(image[i : i + k_size, j : j + k_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
__SCREAMING_SNAKE_CASE : Union[str, Any] = gen_gaussian_kernel(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = ravel(lowercase__ )
# reshape and get the dst image
__SCREAMING_SNAKE_CASE : str = dot(lowercase__ , lowercase__ ).reshape(lowercase__ , lowercase__ ).astype(lowercase__ )
return dst
if __name__ == "__main__":
# read original image
__lowerCAmelCase : List[Any] =imread(r'../image_data/lena.jpg')
# turn image in gray scale value
__lowerCAmelCase : int =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__lowerCAmelCase : str =gaussian_filter(gray, 3, sigma=1)
__lowerCAmelCase : Any =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 696 |
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCamelCase ( lowercase__ ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0]
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return math.pow(lowercase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 696 | 1 |
import argparse
from collections import defaultdict
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(lowercase__ , '''r''' ) as f:
__SCREAMING_SNAKE_CASE : Tuple = f.readlines()
__SCREAMING_SNAKE_CASE : str = F'''class {class_name}('''
__SCREAMING_SNAKE_CASE : Dict = F'''{4 * ' '}def {test_name}('''
__SCREAMING_SNAKE_CASE : Any = F'''{8 * ' '}{correct_line.split()[0]}'''
__SCREAMING_SNAKE_CASE : List[Any] = F'''{16 * ' '}{correct_line.split()[0]}'''
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Tuple = []
for line in lines:
if line.startswith(lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = True
elif in_class and line.startswith(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = True
elif in_class and in_func and (line.startswith(lowercase__ ) or line.startswith(lowercase__ )):
__SCREAMING_SNAKE_CASE : Any = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
__SCREAMING_SNAKE_CASE : Dict = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__SCREAMING_SNAKE_CASE : Tuple = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'''{spaces * ' '}{correct_line}''' )
__SCREAMING_SNAKE_CASE : str = False
else:
new_lines.append(lowercase__ )
with open(lowercase__ , '''w''' ) as f:
for line in new_lines:
f.write(lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__=None ):
if fail is not None:
with open(lowercase__ , '''r''' ) as f:
__SCREAMING_SNAKE_CASE : Optional[Any] = {l.strip() for l in f.readlines()}
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
with open(lowercase__ , '''r''' ) as f:
__SCREAMING_SNAKE_CASE : Optional[int] = f.readlines()
__SCREAMING_SNAKE_CASE : Union[str, Any] = defaultdict(lowercase__ )
for line in correct_lines:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
__lowerCAmelCase : List[str] =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__SCREAMING_SNAKE_CASE : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :str=13 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :Any=224 , lowerCAmelCase__ :str=1_000 , lowerCAmelCase__ :List[Any]=[3, 3, 6, 4] , lowerCAmelCase__ :Tuple=[48, 56, 112, 220] , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : str = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
__SCREAMING_SNAKE_CASE : Any = image_size
__SCREAMING_SNAKE_CASE : str = layer_depths
__SCREAMING_SNAKE_CASE : Any = embed_dims
def __magic_name__( self :List[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels
def __magic_name__( self :Optional[int] ) -> Any:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase__ , layer_scale_init_value=1E-5 , )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE : int = SwiftFormerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = SwiftFormerForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__SCREAMING_SNAKE_CASE : Any = SwiftFormerForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Union[str, Any] ) -> Dict:
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : str = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = False
def __magic_name__( self :Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE : Any = SwiftFormerModelTester(self )
__SCREAMING_SNAKE_CASE : str = ConfigTester(
self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __magic_name__( self :List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def __magic_name__( self :str ) -> Optional[int]:
pass
def __magic_name__( self :Dict ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Optional[int] = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__( self :str ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :str ) -> str:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Any = SwiftFormerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
pass
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
def check_hidden_states_output(lowerCAmelCase__ :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict ):
__SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[int] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : int = 8
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Dict = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> Any:
def _config_zero_init(lowerCAmelCase__ :List[str] ):
__SCREAMING_SNAKE_CASE : List[str] = copy.deepcopy(lowerCAmelCase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , 1E-1_0 )
if isinstance(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : int = _config_zero_init(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return configs_no_init
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : int = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : List[Any] = model_class(config=lowerCAmelCase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__( self :Dict ) -> Dict:
pass
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Optional[Any] ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[str] = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
__SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
__SCREAMING_SNAKE_CASE : int = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 696 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 10**-10 ):
__SCREAMING_SNAKE_CASE : Tuple = a
while True:
__SCREAMING_SNAKE_CASE : Optional[Any] = Decimal(lowercase__ ) - (
Decimal(eval(lowercase__ ) ) / Decimal(eval(str(diff(lowercase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowercase__ ) ) < precision: # noqa: S307
return float(lowercase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""")
# Find Square Root of 5
print(f"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""")
# Exponential Roots
print(f"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
| 696 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase : int ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_2_8,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 5_0,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 1_0,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 1_0,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __magic_name__( cls :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :List[str] ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :Dict ) -> Optional[int]:
CustomConfig.register_for_auto_class()
__SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int
__SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float
__SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool
__SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = PretrainedConfig()
__SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(lowerCAmelCase__ )}.''' )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
__SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock()
__SCREAMING_SNAKE_CASE : List[Any] = 500
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError
__SCREAMING_SNAKE_CASE : str = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head:
__SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
# This test is for deprecated behavior and can be removed in v5
__SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __magic_name__( self :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json''']
__SCREAMING_SNAKE_CASE : Tuple = 768
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) )
__SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __magic_name__( self :List[str] ) -> Union[str, Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__SCREAMING_SNAKE_CASE : int = '''v4.0.0'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0'''
__SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 696 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : int ={
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 | 1 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE__ : Any = '''AutoImageProcessor'''
SCREAMING_SNAKE_CASE__ : Any = '''AutoTokenizer'''
def __init__( self :List[Any] , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Dict=None , **lowerCAmelCase__ :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : int = kwargs.pop('''feature_extractor''' )
__SCREAMING_SNAKE_CASE : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = self.image_processor
__SCREAMING_SNAKE_CASE : int = False
def __call__( self :int , *lowerCAmelCase__ :List[Any] , **lowerCAmelCase__ :Dict ) -> str:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''images''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''text''' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
__SCREAMING_SNAKE_CASE : str = args[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
__SCREAMING_SNAKE_CASE : Any = self.image_processor(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
__SCREAMING_SNAKE_CASE : List[str] = encodings['''input_ids''']
return inputs
def __magic_name__( self :Optional[int] , *lowerCAmelCase__ :Any , **lowerCAmelCase__ :int ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :int , *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :Any ) -> Optional[int]:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@contextmanager
def __magic_name__( self :str ) -> Dict:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer
yield
__SCREAMING_SNAKE_CASE : Dict = self.image_processor
__SCREAMING_SNAKE_CASE : Tuple = False
def __magic_name__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[str]=None ) -> List[Any]:
if added_vocab is None:
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.get_added_vocab()
__SCREAMING_SNAKE_CASE : List[Any] = {}
while tokens:
__SCREAMING_SNAKE_CASE : List[Any] = re.search(r'''<s_(.*?)>''' , lowerCAmelCase__ , re.IGNORECASE )
if start_token is None:
break
__SCREAMING_SNAKE_CASE : str = start_token.group(1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = re.search(rf'''</s_{key}>''' , lowerCAmelCase__ , re.IGNORECASE )
__SCREAMING_SNAKE_CASE : List[str] = start_token.group()
if end_token is None:
__SCREAMING_SNAKE_CASE : Tuple = tokens.replace(lowerCAmelCase__ , '''''' )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = end_token.group()
__SCREAMING_SNAKE_CASE : Dict = re.escape(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = re.escape(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , lowerCAmelCase__ , re.IGNORECASE )
if content is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__SCREAMING_SNAKE_CASE : int = self.tokenajson(lowerCAmelCase__ , is_inner_value=lowerCAmelCase__ , added_vocab=lowerCAmelCase__ )
if value:
if len(lowerCAmelCase__ ) == 1:
__SCREAMING_SNAKE_CASE : Union[str, Any] = value[0]
__SCREAMING_SNAKE_CASE : List[Any] = value
else: # leaf nodes
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for leaf in content.split(r'''<sep/>''' ):
__SCREAMING_SNAKE_CASE : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__SCREAMING_SNAKE_CASE : Optional[Any] = leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase__ )
if len(output[key] ) == 1:
__SCREAMING_SNAKE_CASE : Union[str, Any] = output[key][0]
__SCREAMING_SNAKE_CASE : Optional[Any] = tokens[tokens.find(lowerCAmelCase__ ) + len(lowerCAmelCase__ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowerCAmelCase__ , added_vocab=lowerCAmelCase__ )
if len(lowerCAmelCase__ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def __magic_name__( self :Union[str, Any] ) -> Any:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase__ , )
return self.image_processor
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Optional[int] = d_kv
__SCREAMING_SNAKE_CASE : Tuple = d_ff
__SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE : List[Any] = num_heads
__SCREAMING_SNAKE_CASE : List[Any] = num_experts
__SCREAMING_SNAKE_CASE : Tuple = expert_capacity
__SCREAMING_SNAKE_CASE : List[Any] = router_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__SCREAMING_SNAKE_CASE : List[Any] = router_dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate
__SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : int = initializer_factor
__SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj
__SCREAMING_SNAKE_CASE : Any = use_cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs
__SCREAMING_SNAKE_CASE : int = router_z_loss_coef
__SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
__SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
__SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 696 | 1 |
import os
import sys
import unittest
__lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers')
__lowerCAmelCase : Optional[Any] ='\n{0} = None\n'
__lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
__lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tokenizers''' )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' )
__SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' )
__SCREAMING_SNAKE_CASE : Any = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' )
__SCREAMING_SNAKE_CASE : List[str] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCAmelCase__ )
self.assertIn('''tensorflow_text''' , lowerCAmelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' )
__SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__SCREAMING_SNAKE_CASE : int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
| 696 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 696 | 1 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : List[str] =['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Dict=1 ) -> str:
__SCREAMING_SNAKE_CASE : str = tokenizer
__SCREAMING_SNAKE_CASE : List[Any] = dataset
__SCREAMING_SNAKE_CASE : str = len(lowerCAmelCase__ ) if n_tasks is None else n_tasks
__SCREAMING_SNAKE_CASE : List[Any] = n_copies
def __iter__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
__SCREAMING_SNAKE_CASE : int = self.tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int ) -> int:
__SCREAMING_SNAKE_CASE : Optional[int] = start_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = eof_strings
__SCREAMING_SNAKE_CASE : str = tokenizer
def __call__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__SCREAMING_SNAKE_CASE : Any = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCAmelCase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = re.split('''(%s)''' % '''|'''.join(lowercase__ ) , lowercase__ )
# last string should be ""
return "".join(string_list[:-2] )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=20 , **lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = defaultdict(lowercase__ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(lowercase__ ) ):
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[str] = batch['''ids'''].shape[-1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.unwrap_model(lowercase__ ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=lowercase__ , **lowercase__ )
# each task is generated batch_size times
__SCREAMING_SNAKE_CASE : Tuple = batch['''task_id'''].repeat(lowercase__ )
__SCREAMING_SNAKE_CASE : str = accelerator.pad_across_processes(
lowercase__ , dim=1 , pad_index=tokenizer.pad_token_id )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.gather((generated_tokens, generated_tasks) )
__SCREAMING_SNAKE_CASE : Dict = generated_tokens.cpu().numpy()
__SCREAMING_SNAKE_CASE : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(lowercase__ , lowercase__ ):
gen_token_dict[task].append(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = [[] for _ in range(lowercase__ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ )
code_gens[task].append(remove_last_block(lowercase__ ) )
return code_gens
def _UpperCamelCase ( ):
# Setup configuration
__SCREAMING_SNAKE_CASE : str = HfArgumentParser(lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__SCREAMING_SNAKE_CASE : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__SCREAMING_SNAKE_CASE : Optional[Any] = '''false'''
if args.num_workers is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__SCREAMING_SNAKE_CASE : Optional[int] = Accelerator()
set_seed(args.seed , device_specific=lowercase__ )
# Load model and tokenizer
__SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(args.model_ckpt )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.eos_token
__SCREAMING_SNAKE_CASE : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__SCREAMING_SNAKE_CASE : List[str] = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , lowercase__ , lowercase__ )] ),
}
# Load evaluation dataset and metric
__SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset('''openai_humaneval''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = load_metric('''code_eval''' )
__SCREAMING_SNAKE_CASE : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
__SCREAMING_SNAKE_CASE : int = args.n_samples // args.batch_size
__SCREAMING_SNAKE_CASE : int = TokenizedDataset(lowercase__ , human_eval['''test'''] , n_copies=lowercase__ , n_tasks=lowercase__ )
# do not confuse args.batch_size, which is actually the num_return_sequences
__SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(lowercase__ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__SCREAMING_SNAKE_CASE : str = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = accelerator.prepare(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = complete_code(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , n_tasks=lowercase__ , batch_size=args.batch_size , **lowercase__ , )
if accelerator.is_main_process:
__SCREAMING_SNAKE_CASE : str = []
for task in tqdm(range(lowercase__ ) ):
__SCREAMING_SNAKE_CASE : List[str] = human_eval['''test'''][task]['''test''']
__SCREAMING_SNAKE_CASE : Any = F'''check({human_eval['test'][task]['entry_point']})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = code_eval_metric.compute(
references=lowercase__ , predictions=lowercase__ , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(lowercase__ , lowercase__ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 |
from datetime import datetime
import requests
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowercase__ ).content
if __name__ == "__main__":
__lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip()
__lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 696 | 1 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :UNetaDModel , lowerCAmelCase__ :UNetaDModel , lowerCAmelCase__ :DDPMScheduler , lowerCAmelCase__ :Union[str, Any] , ) -> str:
super().__init__()
__SCREAMING_SNAKE_CASE : Union[str, Any] = value_function
__SCREAMING_SNAKE_CASE : Any = unet
__SCREAMING_SNAKE_CASE : int = scheduler
__SCREAMING_SNAKE_CASE : Union[str, Any] = env
__SCREAMING_SNAKE_CASE : Tuple = env.get_dataset()
__SCREAMING_SNAKE_CASE : Optional[int] = {}
for key in self.data.keys():
try:
__SCREAMING_SNAKE_CASE : int = self.data[key].mean()
except: # noqa: E722
pass
__SCREAMING_SNAKE_CASE : int = {}
for key in self.data.keys():
try:
__SCREAMING_SNAKE_CASE : int = self.data[key].std()
except: # noqa: E722
pass
__SCREAMING_SNAKE_CASE : Optional[int] = env.observation_space.shape[0]
__SCREAMING_SNAKE_CASE : Tuple = env.action_space.shape[0]
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> List[Any]:
return (x_in - self.means[key]) / self.stds[key]
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int ) -> List[str]:
return x_in * self.stds[key] + self.means[key]
def __magic_name__( self :List[str] , lowerCAmelCase__ :Any ) -> Tuple:
if type(lowerCAmelCase__ ) is dict:
return {k: self.to_torch(lowerCAmelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCAmelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCAmelCase__ , device=self.unet.device )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any ) -> Any:
for key, val in cond.items():
__SCREAMING_SNAKE_CASE : List[Any] = val.clone()
return x_in
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict ) -> Any:
__SCREAMING_SNAKE_CASE : str = x.shape[0]
__SCREAMING_SNAKE_CASE : int = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
__SCREAMING_SNAKE_CASE : int = torch.full((batch_size,) , lowerCAmelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCAmelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
__SCREAMING_SNAKE_CASE : Optional[Any] = self.value_function(x.permute(0 , 2 , 1 ) , lowerCAmelCase__ ).sample
__SCREAMING_SNAKE_CASE : Any = torch.autograd.grad([y.sum()] , [x] )[0]
__SCREAMING_SNAKE_CASE : int = self.scheduler._get_variance(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = torch.exp(0.5 * posterior_variance )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_std * grad
__SCREAMING_SNAKE_CASE : List[str] = 0
__SCREAMING_SNAKE_CASE : Tuple = x.detach()
__SCREAMING_SNAKE_CASE : Dict = x + scale * grad
__SCREAMING_SNAKE_CASE : List[Any] = self.reset_xa(lowerCAmelCase__ , lowerCAmelCase__ , self.action_dim )
__SCREAMING_SNAKE_CASE : Tuple = self.unet(x.permute(0 , 2 , 1 ) , lowerCAmelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , predict_epsilon=lowerCAmelCase__ )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
__SCREAMING_SNAKE_CASE : Optional[int] = self.reset_xa(lowerCAmelCase__ , lowerCAmelCase__ , self.action_dim )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.to_torch(lowerCAmelCase__ )
return x, y
def __call__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :Dict=32 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :int=0.1 ) -> List[str]:
# normalize the observations and create batch dimension
__SCREAMING_SNAKE_CASE : int = self.normalize(lowerCAmelCase__ , '''observations''' )
__SCREAMING_SNAKE_CASE : Dict = obs[None].repeat(lowerCAmelCase__ , axis=0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = {0: self.to_torch(lowerCAmelCase__ )}
__SCREAMING_SNAKE_CASE : Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
__SCREAMING_SNAKE_CASE : int = randn_tensor(lowerCAmelCase__ , device=self.unet.device )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.reset_xa(lowerCAmelCase__ , lowerCAmelCase__ , self.action_dim )
__SCREAMING_SNAKE_CASE : Optional[int] = self.to_torch(lowerCAmelCase__ )
# run the diffusion process
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = self.run_diffusion(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# sort output trajectories by value
__SCREAMING_SNAKE_CASE : int = y.argsort(0 , descending=lowerCAmelCase__ ).squeeze()
__SCREAMING_SNAKE_CASE : List[Any] = x[sorted_idx]
__SCREAMING_SNAKE_CASE : str = sorted_values[:, :, : self.action_dim]
__SCREAMING_SNAKE_CASE : Optional[int] = actions.detach().cpu().numpy()
__SCREAMING_SNAKE_CASE : int = self.de_normalize(lowerCAmelCase__ , key='''actions''' )
# select the action with the highest value
if y is not None:
__SCREAMING_SNAKE_CASE : str = 0
else:
# if we didn't run value guiding, select a random action
__SCREAMING_SNAKE_CASE : int = np.random.randint(0 , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self :int ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = '''french fries'''
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5
__SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae''']
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
__SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = self.get_inputs()
__SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : str = self.get_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = 0
def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__( self :List[str] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) )
__SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix'''
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
__SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 696 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : str = "text"
SCREAMING_SNAKE_CASE__ : str = "summary"
@property
def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 696 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 1
@register_to_config
def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]:
if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE : Any = 1.0
# setable values
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) )
def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor:
return sample
def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.timesteps += self.config.steps_offset
def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
__SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep]
__SCREAMING_SNAKE_CASE : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__SCREAMING_SNAKE_CASE : List[Any] = model_output
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE : List[str] = model_output
__SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __len__( self :Optional[int] ) -> List[Any]:
return self.config.num_train_timesteps
| 696 | 1 |
def _UpperCamelCase ( lowercase__ ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : str = "text"
SCREAMING_SNAKE_CASE__ : str = "summary"
@property
def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 696 | 1 |
import re
def _UpperCamelCase ( lowercase__ ):
return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
try:
__SCREAMING_SNAKE_CASE : Optional[Any] = split_input(lowercase__ )
if upper:
__SCREAMING_SNAKE_CASE : int = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__SCREAMING_SNAKE_CASE : List[Any] = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _UpperCamelCase ( lowercase__ ):
return to_simple_case(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
try:
__SCREAMING_SNAKE_CASE : Optional[Any] = to_simple_case(lowercase__ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return to_complex_case(lowercase__ , lowercase__ , '''_''' )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return to_complex_case(lowercase__ , lowercase__ , '''-''' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 696 |
def _UpperCamelCase ( lowercase__ = 10**9 ):
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Any = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 | 1 |
import os
def _UpperCamelCase ( ):
with open(os.path.dirname(lowercase__ ) + '''/p022_names.txt''' ) as file:
__SCREAMING_SNAKE_CASE : List[Any] = str(file.readlines()[0] )
__SCREAMING_SNAKE_CASE : List[Any] = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Any = 0
for i, name in enumerate(lowercase__ ):
for letter in name:
name_score += ord(lowercase__ ) - 64
total_score += (i + 1) * name_score
__SCREAMING_SNAKE_CASE : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE : str = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
if a[i].islower():
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _lowercase ( datasets.BuilderConfig ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[datasets.Features] = None
class _lowercase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PandasConfig
def __magic_name__( self :Dict ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__SCREAMING_SNAKE_CASE : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__SCREAMING_SNAKE_CASE : str = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={'''files''': files} ) )
return splits
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :pa.Table ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE : Any = table_cast(lowerCAmelCase__ , self.config.features.arrow_schema )
return pa_table
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Optional[int] ) -> Tuple:
for i, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
with open(lowerCAmelCase__ , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Any = pa.Table.from_pandas(pd.read_pickle(lowerCAmelCase__ ) )
yield i, self._cast_table(lowerCAmelCase__ )
| 696 |
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase : str ='\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__lowerCAmelCase : Tuple ='\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__lowerCAmelCase : Optional[int] ='\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[int] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False ) -> int:
if return_pvalue:
__SCREAMING_SNAKE_CASE : int = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )}
| 696 | 1 |
def _UpperCamelCase ( lowercase__ = 4000000 ):
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = b, a + b
return sum(lowercase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : int ={
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2}
__lowerCAmelCase : Union[str, Any] ={}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer
def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
__SCREAMING_SNAKE_CASE : str = strip_accents
__SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = do_lower_case
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 696 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__SCREAMING_SNAKE_CASE : int = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
else:
__SCREAMING_SNAKE_CASE : Dict = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
__SCREAMING_SNAKE_CASE : Dict = in_proj_bias[: config.hidden_size]
__SCREAMING_SNAKE_CASE : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__SCREAMING_SNAKE_CASE : Any = in_proj_weight[
-config.hidden_size :, :
]
__SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[-config.hidden_size :]
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = dct.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = val
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__SCREAMING_SNAKE_CASE : Optional[int] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=True ):
__SCREAMING_SNAKE_CASE : str = ViTConfig()
# patch_size
if model_name[-1] == "8":
__SCREAMING_SNAKE_CASE : Dict = 8
# set labels if required
if not base_model:
__SCREAMING_SNAKE_CASE : List[Any] = 1000
__SCREAMING_SNAKE_CASE : int = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json'''
__SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : int = {int(lowercase__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Any = idalabel
__SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__SCREAMING_SNAKE_CASE : Dict = 384
__SCREAMING_SNAKE_CASE : str = 1536
__SCREAMING_SNAKE_CASE : int = 12
__SCREAMING_SNAKE_CASE : Optional[Any] = 6
# load original model from torch hub
__SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load('''facebookresearch/dino:main''' , lowercase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__SCREAMING_SNAKE_CASE : Optional[int] = original_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = create_rename_keys(lowercase__ , base_model=lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
if base_model:
__SCREAMING_SNAKE_CASE : Tuple = ViTModel(lowercase__ , add_pooling_layer=lowercase__ ).eval()
else:
__SCREAMING_SNAKE_CASE : Dict = ViTForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by ViTImageProcessor
__SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor()
__SCREAMING_SNAKE_CASE : Dict = image_processor(images=prepare_img() , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : str = encoding['''pixel_values''']
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowercase__ )
if base_model:
__SCREAMING_SNAKE_CASE : List[Any] = original_model(lowercase__ )
assert torch.allclose(lowercase__ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
__SCREAMING_SNAKE_CASE : Dict = original_model(lowercase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1e-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__lowerCAmelCase : int =parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 696 |
import os
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = len(grid[0] )
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Dict = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowercase__ ):
for j in range(n_rows - 3 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__SCREAMING_SNAKE_CASE : Optional[int] = max(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if max_product > largest:
__SCREAMING_SNAKE_CASE : Tuple = max_product
return largest
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
with open(os.path.dirname(lowercase__ ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
__SCREAMING_SNAKE_CASE : str = [[int(lowercase__ ) for i in grid[j]] for j in range(len(lowercase__ ) )]
return largest_product(lowercase__ )
if __name__ == "__main__":
print(solution())
| 696 | 1 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ['''onnx''']
def __init__( self :List[Any] , *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :Union[str, Any] ) -> int:
requires_backends(self , ['''onnx'''] )
@classmethod
def __magic_name__( cls :List[Any] , *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :Tuple ) -> List[str]:
requires_backends(cls , ['''onnx'''] )
@classmethod
def __magic_name__( cls :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :List[Any] ) -> Optional[int]:
requires_backends(cls , ['''onnx'''] )
| 696 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) )
class _lowercase :
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier
__SCREAMING_SNAKE_CASE : Dict = min_depth
__SCREAMING_SNAKE_CASE : List[str] = tf_padding
__SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier )
__SCREAMING_SNAKE_CASE : List[str] = output_stride
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = scope
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __magic_name__( self :Dict ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __magic_name__( self :List[Any] ) -> List[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __magic_name__( self :Any ) -> Dict:
pass
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Optional[int] = 26
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :List[str] ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
__SCREAMING_SNAKE_CASE : int = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 696 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :List[str] , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Optional[int] = 13
__SCREAMING_SNAKE_CASE : Union[str, Any] = 7
__SCREAMING_SNAKE_CASE : Tuple = True
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : List[str] = 2
__SCREAMING_SNAKE_CASE : int = 99
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : Any = 32
__SCREAMING_SNAKE_CASE : Optional[int] = 2
__SCREAMING_SNAKE_CASE : Optional[int] = 4
__SCREAMING_SNAKE_CASE : Tuple = 0.1
__SCREAMING_SNAKE_CASE : Optional[Any] = 0.1
__SCREAMING_SNAKE_CASE : Tuple = 512
__SCREAMING_SNAKE_CASE : Tuple = 16
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : List[str] = 0.02
__SCREAMING_SNAKE_CASE : Tuple = 3
__SCREAMING_SNAKE_CASE : str = 4
__SCREAMING_SNAKE_CASE : List[str] = '''last'''
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : str = 0
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_lengths:
__SCREAMING_SNAKE_CASE : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : int = TFFlaubertModel(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = [input_ids, input_mask]
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[str] = TFFlaubertWithLMHeadModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Union[str, Any] , ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = TFFlaubertForQuestionAnsweringSimple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = {'''input_ids''': input_ids, '''lengths''': input_lengths}
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any , ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = TFFlaubertForSequenceClassification(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__( self :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = TFFlaubertForTokenClassification(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple , ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.num_choices
__SCREAMING_SNAKE_CASE : List[str] = TFFlaubertForMultipleChoice(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : Any = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : str = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__( self :List[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ : int = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE__ : List[Any] = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
def __magic_name__( self :int , lowerCAmelCase__ :int , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __magic_name__( self :Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Dict = TFFlaubertModelTester(self )
__SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , emb_dim=37 )
def __magic_name__( self :Dict ) -> int:
self.config_tester.run_common_tests()
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase__ )
def __magic_name__( self :str ) -> Any:
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> str:
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCAmelCase__ )
def __magic_name__( self :str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : List[str] = TFFlaubertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Optional[int] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : Tuple = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , lowerCAmelCase__ )
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 696 |
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase : List[Any] =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = Github(os.environ['''GITHUB_TOKEN'''] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = g.get_repo('''huggingface/diffusers''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 696 | 1 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
__SCREAMING_SNAKE_CASE : List[Any] = (boundary[1] - boundary[0]) / steps
__SCREAMING_SNAKE_CASE : Optional[int] = boundary[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = boundary[1]
__SCREAMING_SNAKE_CASE : Any = make_points(lowercase__ , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = 0.0
y += (h / 2.0) * f(lowercase__ )
for i in x_i:
# print(i)
y += h * f(lowercase__ )
y += (h / 2.0) * f(lowercase__ )
return y
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = a + h
while x < (b - h):
yield x
__SCREAMING_SNAKE_CASE : List[Any] = x + h
def _UpperCamelCase ( lowercase__ ): # enter your function here
__SCREAMING_SNAKE_CASE : Optional[Any] = (x - 0) * (x - 0)
return y
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : str = 0.0 # Lower bound of integration
__SCREAMING_SNAKE_CASE : Dict = 1.0 # Upper bound of integration
__SCREAMING_SNAKE_CASE : Optional[int] = 10.0 # define number of steps or resolution
__SCREAMING_SNAKE_CASE : Optional[int] = [a, b] # define boundary of integration
__SCREAMING_SNAKE_CASE : Any = method_a(lowercase__ , lowercase__ )
print(F'''y = {y}''' )
if __name__ == "__main__":
main()
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''canine'''
def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
# Character config:
__SCREAMING_SNAKE_CASE : Tuple = downsampling_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size
__SCREAMING_SNAKE_CASE : Any = num_hash_functions
__SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets
__SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
| 696 | 1 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Union[str, Any] =get_tests_dir('fixtures/test_sentencepiece.model')
__lowerCAmelCase : List[Any] =get_tests_dir('fixtures/test_sentencepiece_bpe.model')
__lowerCAmelCase : Union[str, Any] ='pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = CamembertTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = CamembertTokenizerFast
SCREAMING_SNAKE_CASE__ : List[str] = True
SCREAMING_SNAKE_CASE__ : Dict = True
def __magic_name__( self :List[str] ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE : Union[str, Any] = CamembertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__( self :int ) -> str:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''<pad>'''
__SCREAMING_SNAKE_CASE : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCAmelCase__ ) , 1_004 )
def __magic_name__( self :Any ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def __magic_name__( self :int ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = CamembertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[int] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Dict = '''I was born in 92000, and this is falsé.'''
__SCREAMING_SNAKE_CASE : int = tokenizer.encode(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :str ) -> str:
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : str = '''I was born in 92000, and this is falsé.'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
# fmt: off
__SCREAMING_SNAKE_CASE : int = {'''input_ids''': [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__SCREAMING_SNAKE_CASE : Any = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=lowerCAmelCase__ , )
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''mems''']
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = []
self.cutoffs.extend(lowerCAmelCase__ )
if proj_share_all_but_first:
__SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs )
else:
__SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs )
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed
__SCREAMING_SNAKE_CASE : Tuple = d_head
__SCREAMING_SNAKE_CASE : Dict = d_inner
__SCREAMING_SNAKE_CASE : Optional[Any] = div_val
__SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm
__SCREAMING_SNAKE_CASE : List[str] = n_layer
__SCREAMING_SNAKE_CASE : int = n_head
__SCREAMING_SNAKE_CASE : str = mem_len
__SCREAMING_SNAKE_CASE : Union[str, Any] = same_length
__SCREAMING_SNAKE_CASE : str = attn_type
__SCREAMING_SNAKE_CASE : Dict = clamp_len
__SCREAMING_SNAKE_CASE : Tuple = sample_softmax
__SCREAMING_SNAKE_CASE : Optional[int] = adaptive
__SCREAMING_SNAKE_CASE : int = dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = dropatt
__SCREAMING_SNAKE_CASE : int = untie_r
__SCREAMING_SNAKE_CASE : Optional[int] = init
__SCREAMING_SNAKE_CASE : List[str] = init_range
__SCREAMING_SNAKE_CASE : Any = proj_init_std
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :str ) -> int:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 696 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Tuple ={
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] =['PerceiverFeatureExtractor']
__lowerCAmelCase : str =['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =[
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : Any ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert'''
def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = position_embedding_type
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
| 696 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] ) -> Any:
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self :str , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :int = 100 , lowerCAmelCase__ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ :Optional[float] = None , lowerCAmelCase__ :bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
__SCREAMING_SNAKE_CASE : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__SCREAMING_SNAKE_CASE : int = int(lowerCAmelCase__ )
if sample_size % down_scale_factor != 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = int(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = next(iter(self.unet.parameters() ) ).dtype
__SCREAMING_SNAKE_CASE : Any = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase__ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ , device=audio.device )
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps.to(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# 2. compute previous image: x_t -> t_t-1
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
__SCREAMING_SNAKE_CASE : Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
__SCREAMING_SNAKE_CASE : Optional[int] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCAmelCase__ )
| 696 |
import os
import sys
import unittest
__lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers')
__lowerCAmelCase : Optional[Any] ='\n{0} = None\n'
__lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
__lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tokenizers''' )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' )
__SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' )
__SCREAMING_SNAKE_CASE : Any = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' )
__SCREAMING_SNAKE_CASE : List[str] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCAmelCase__ )
self.assertIn('''tensorflow_text''' , lowerCAmelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' )
__SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__SCREAMING_SNAKE_CASE : int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
| 696 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Optional[Any] ={
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =['OwlViTFeatureExtractor']
__lowerCAmelCase : List[Any] =['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =[
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCAmelCase : Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCamelCase ( lowercase__ ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0]
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return math.pow(lowercase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 696 | 1 |
from typing import Any
import numpy as np
def _UpperCamelCase ( lowercase__ ):
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = v.conjugate().T
__SCREAMING_SNAKE_CASE : List[str] = v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Any = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'''{a} is not hermitian.'''
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__SCREAMING_SNAKE_CASE : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'''{a} is not hermitian.'''
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__SCREAMING_SNAKE_CASE : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) )
class _lowercase :
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier
__SCREAMING_SNAKE_CASE : Dict = min_depth
__SCREAMING_SNAKE_CASE : List[str] = tf_padding
__SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier )
__SCREAMING_SNAKE_CASE : List[str] = output_stride
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = scope
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __magic_name__( self :Dict ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __magic_name__( self :List[Any] ) -> List[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __magic_name__( self :Any ) -> Dict:
pass
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Optional[int] = 26
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :List[str] ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
__SCREAMING_SNAKE_CASE : int = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 696 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : Optional[int] ={
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =[
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase : int ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_2_8,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 5_0,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 1_0,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 1_0,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __magic_name__( cls :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :List[str] ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :Dict ) -> Optional[int]:
CustomConfig.register_for_auto_class()
__SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int
__SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float
__SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool
__SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = PretrainedConfig()
__SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(lowerCAmelCase__ )}.''' )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
__SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock()
__SCREAMING_SNAKE_CASE : List[Any] = 500
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError
__SCREAMING_SNAKE_CASE : str = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head:
__SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
# This test is for deprecated behavior and can be removed in v5
__SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __magic_name__( self :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json''']
__SCREAMING_SNAKE_CASE : Tuple = 768
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) )
__SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __magic_name__( self :List[str] ) -> Union[str, Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__SCREAMING_SNAKE_CASE : int = '''v4.0.0'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0'''
__SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 696 | 1 |
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : list[list[int]] = [[0 for _ in range(lowercase__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__SCREAMING_SNAKE_CASE : int = 1
for n in range(m + 1 ):
for k in range(1 , lowercase__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__lowerCAmelCase : Union[str, Any] =int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__lowerCAmelCase : Tuple =int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 696 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 | 1 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__SCREAMING_SNAKE_CASE : int = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
__SCREAMING_SNAKE_CASE : str = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
__SCREAMING_SNAKE_CASE : Optional[int] = max(len(lowercase__ ) , len(lowercase__ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Optional[int] = d_kv
__SCREAMING_SNAKE_CASE : Tuple = d_ff
__SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE : List[Any] = num_heads
__SCREAMING_SNAKE_CASE : List[Any] = num_experts
__SCREAMING_SNAKE_CASE : Tuple = expert_capacity
__SCREAMING_SNAKE_CASE : List[Any] = router_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__SCREAMING_SNAKE_CASE : List[Any] = router_dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate
__SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : int = initializer_factor
__SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj
__SCREAMING_SNAKE_CASE : Any = use_cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs
__SCREAMING_SNAKE_CASE : int = router_z_loss_coef
__SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
__SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
__SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 696 | 1 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
class _lowercase :
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = question_encoder
__SCREAMING_SNAKE_CASE : int = generator
__SCREAMING_SNAKE_CASE : str = self.question_encoder
def __magic_name__( self :List[str] , lowerCAmelCase__ :int ) -> Any:
if os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = os.path.join(lowerCAmelCase__ , '''question_encoder_tokenizer''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(lowerCAmelCase__ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(lowerCAmelCase__ )
self.generator.save_pretrained(lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :str , lowerCAmelCase__ :str , **lowerCAmelCase__ :Union[str, Any] ) -> int:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__SCREAMING_SNAKE_CASE : Any = kwargs.pop('''config''' , lowerCAmelCase__ )
if config is None:
__SCREAMING_SNAKE_CASE : Dict = RagConfig.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(
lowerCAmelCase__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
__SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained(
lowerCAmelCase__ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=lowerCAmelCase__ , generator=lowerCAmelCase__ )
def __call__( self :Dict , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Tuple ) -> str:
return self.current_tokenizer(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :List[str] , *lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[Any] ) -> List[str]:
return self.generator.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Optional[int] , *lowerCAmelCase__ :int , **lowerCAmelCase__ :Tuple ) -> Tuple:
return self.generator.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : Tuple = self.question_encoder
def __magic_name__( self :Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE : List[Any] = self.generator
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[List[str]] = None , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :str = "longest" , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = True , **lowerCAmelCase__ :Optional[int] , ) -> BatchEncoding:
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , lowerCAmelCase__ , )
if max_length is None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.current_tokenizer.model_max_length
__SCREAMING_SNAKE_CASE : Dict = self(
lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , **lowerCAmelCase__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__SCREAMING_SNAKE_CASE : List[Any] = self.current_tokenizer.model_max_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = self(
text_target=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[Any] = labels['''input_ids''']
return model_inputs
| 696 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 696 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__lowerCAmelCase : int =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = '''gpt_neo'''
SCREAMING_SNAKE_CASE__ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :str , lowerCAmelCase__ :Optional[Any]=50_257 , lowerCAmelCase__ :List[Any]=2_048 , lowerCAmelCase__ :Dict=2_048 , lowerCAmelCase__ :Union[str, Any]=24 , lowerCAmelCase__ :str=[[["global", "local"], 12]] , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Optional[Any]=256 , lowerCAmelCase__ :Union[str, Any]="gelu_new" , lowerCAmelCase__ :List[str]=0.0 , lowerCAmelCase__ :Any=0.0 , lowerCAmelCase__ :Optional[int]=0.0 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :Dict=1E-5 , lowerCAmelCase__ :Union[str, Any]=0.02 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Optional[Any]=50_256 , lowerCAmelCase__ :Tuple=50_256 , **lowerCAmelCase__ :str , ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : int = num_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_heads
__SCREAMING_SNAKE_CASE : int = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[Any] = window_size
__SCREAMING_SNAKE_CASE : List[Any] = activation_function
__SCREAMING_SNAKE_CASE : Union[str, Any] = resid_dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = embed_dropout
__SCREAMING_SNAKE_CASE : List[str] = attention_dropout
__SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
__SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : List[str] = use_cache
__SCREAMING_SNAKE_CASE : List[str] = bos_token_id
__SCREAMING_SNAKE_CASE : Dict = eos_token_id
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_types
__SCREAMING_SNAKE_CASE : Optional[Any] = self.expand_attention_types_params(lowerCAmelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@staticmethod
def __magic_name__( lowerCAmelCase__ :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Any = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
import torch
__SCREAMING_SNAKE_CASE : List[Any] = input.size()
__SCREAMING_SNAKE_CASE : List[str] = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = shape[dimension]
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Any = torch.div(sizedim - size , lowercase__ , rounding_mode='''floor''' ) + 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(lowercase__ ) + low_indices[:min_length][:, None]
__SCREAMING_SNAKE_CASE : str = [slice(lowercase__ )] * rank
__SCREAMING_SNAKE_CASE : int = indices
__SCREAMING_SNAKE_CASE : Dict = input[s]
__SCREAMING_SNAKE_CASE : Optional[int] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
import torch
__SCREAMING_SNAKE_CASE : Any = torch.arange(1 , lowercase__ )
__SCREAMING_SNAKE_CASE : Any = torch.remainder(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = remainders == 0
__SCREAMING_SNAKE_CASE : Optional[Any] = candidates[divisor_indices]
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.max(lowercase__ )
return largest_divisor, torch.div(lowercase__ , lowercase__ , rounding_mode='''floor''' )
class _lowercase ( A__ ):
'''simple docstring'''
@property
def __magic_name__( self :Tuple ) -> Mapping[str, Mapping[int, str]]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction='''inputs''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__SCREAMING_SNAKE_CASE : int = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __magic_name__( self :List[str] ) -> int:
return self._config.num_heads
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :PreTrainedTokenizer , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
__SCREAMING_SNAKE_CASE : str = super(lowerCAmelCase__ , self ).generate_dummy_inputs(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
# We need to order the input in the way they appears in the forward()
__SCREAMING_SNAKE_CASE : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE : Any = seqlen + 2
__SCREAMING_SNAKE_CASE : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(self.num_layers )
]
__SCREAMING_SNAKE_CASE : Optional[Any] = common_inputs['''attention_mask''']
if self.use_past:
__SCREAMING_SNAKE_CASE : int = ordered_inputs['''attention_mask'''].dtype
__SCREAMING_SNAKE_CASE : int = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
return ordered_inputs
@property
def __magic_name__( self :List[Any] ) -> int:
return 13
| 696 |
from datetime import datetime
import requests
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowercase__ ).content
if __name__ == "__main__":
__lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip()
__lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 696 | 1 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowerCAmelCase : int =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
__SCREAMING_SNAKE_CASE : int = os.path.abspath(lowercase__ )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
__SCREAMING_SNAKE_CASE : Any = torch.load(lowercase__ , map_location='''cpu''' )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = convert_pytorch_state_dict_to_flax(lowercase__ , lowercase__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_sharded_state_dict_to_flax(lowercase__ , lowercase__ )
return flax_state_dict
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
def is_key_or_prefix_key_in_dict(lowercase__ ) -> bool:
return len(set(lowercase__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(lowercase__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__SCREAMING_SNAKE_CASE : Dict = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(lowercase__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__SCREAMING_SNAKE_CASE : Union[str, Any] = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(lowercase__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__SCREAMING_SNAKE_CASE : Union[str, Any] = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(lowercase__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__SCREAMING_SNAKE_CASE : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__SCREAMING_SNAKE_CASE : str = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__SCREAMING_SNAKE_CASE : int = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__SCREAMING_SNAKE_CASE : Any = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__SCREAMING_SNAKE_CASE : Dict = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__SCREAMING_SNAKE_CASE : List[Any] = pt_tuple_key[-2] + '''_v'''
if name is not None:
__SCREAMING_SNAKE_CASE : Dict = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _UpperCamelCase ( lowercase__ , lowercase__ ):
# convert pytorch tensor to numpy
__SCREAMING_SNAKE_CASE : Any = {k: v.numpy() for k, v in pt_state_dict.items()}
__SCREAMING_SNAKE_CASE : Dict = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__SCREAMING_SNAKE_CASE : int = flax_model.params['''params''']
else:
__SCREAMING_SNAKE_CASE : int = flax_model.params
__SCREAMING_SNAKE_CASE : List[Any] = flatten_dict(lowercase__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__SCREAMING_SNAKE_CASE : Tuple = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : Dict = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
__SCREAMING_SNAKE_CASE : Any = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
__SCREAMING_SNAKE_CASE : List[str] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__SCREAMING_SNAKE_CASE : Any = pt_tuple_key[1:]
# Correctly rename weight parameters
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = rename_key_and_reshape_tensor(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# add model prefix if necessary
__SCREAMING_SNAKE_CASE : Tuple = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__SCREAMING_SNAKE_CASE : Optional[int] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__SCREAMING_SNAKE_CASE : List[Any] = jnp.asarray(lowercase__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowercase__ , lowercase__ )
continue
# also add unexpected weight so that warning is thrown
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.asarray(lowercase__ )
else:
# also add unexpected weight so that warning is thrown
__SCREAMING_SNAKE_CASE : int = jnp.asarray(lowercase__ )
return unflatten_dict(lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
import torch
# Load the index
__SCREAMING_SNAKE_CASE : Any = {}
for shard_file in shard_filenames:
# load using msgpack utils
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(lowercase__ )
__SCREAMING_SNAKE_CASE : str = {k: v.numpy() for k, v in pt_state_dict.items()}
__SCREAMING_SNAKE_CASE : Union[str, Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__SCREAMING_SNAKE_CASE : Optional[Any] = flax_model.params['''params''']
__SCREAMING_SNAKE_CASE : Tuple = flatten_dict(lowercase__ )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
__SCREAMING_SNAKE_CASE : str = flax_model.params
__SCREAMING_SNAKE_CASE : Optional[int] = flatten_dict(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__SCREAMING_SNAKE_CASE : Dict = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
__SCREAMING_SNAKE_CASE : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__SCREAMING_SNAKE_CASE : int = pt_tuple_key[1:]
# Correctly rename weight parameters
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = rename_key_and_reshape_tensor(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# add model prefix if necessary
__SCREAMING_SNAKE_CASE : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__SCREAMING_SNAKE_CASE : str = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__SCREAMING_SNAKE_CASE : Any = jnp.asarray(lowercase__ )
continue
if "var" in flax_key[-1]:
__SCREAMING_SNAKE_CASE : Tuple = jnp.asarray(lowercase__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowercase__ , lowercase__ )
continue
# also add unexpected weight so that warning is thrown
__SCREAMING_SNAKE_CASE : Dict = jnp.asarray(lowercase__ )
else:
# also add unexpected weight so that warning is thrown
__SCREAMING_SNAKE_CASE : str = jnp.asarray(lowercase__ )
return unflatten_dict(lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.abspath(lowercase__ )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
__SCREAMING_SNAKE_CASE : Any = getattr(lowercase__ , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(lowercase__ , '''rb''' ) as state_f:
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = from_bytes(lowercase__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
__SCREAMING_SNAKE_CASE : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda lowercase__ : x.dtype == jnp.bfloataa , lowercase__ ) ).values()
if any(lowercase__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.tree_util.tree_map(
lambda lowercase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = flatten_dict(lowercase__ )
__SCREAMING_SNAKE_CASE : str = pt_model.state_dict()
__SCREAMING_SNAKE_CASE : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Optional[Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__SCREAMING_SNAKE_CASE : Optional[Any] = flax_key_tuple[0] == pt_model.base_model_prefix
__SCREAMING_SNAKE_CASE : Any = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__SCREAMING_SNAKE_CASE : List[str] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__SCREAMING_SNAKE_CASE : Any = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(lowercase__ ) not in pt_model_dict:
# conv layer
__SCREAMING_SNAKE_CASE : Optional[Any] = flax_key_tuple[:-1] + ('''weight''',)
__SCREAMING_SNAKE_CASE : str = jnp.transpose(lowercase__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase__ ) not in pt_model_dict:
# linear layer
__SCREAMING_SNAKE_CASE : List[Any] = flax_key_tuple[:-1] + ('''weight''',)
__SCREAMING_SNAKE_CASE : Optional[int] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__SCREAMING_SNAKE_CASE : int = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__SCREAMING_SNAKE_CASE : Optional[int] = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
__SCREAMING_SNAKE_CASE : Dict = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
__SCREAMING_SNAKE_CASE : Dict = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''.'''.join(lowercase__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__SCREAMING_SNAKE_CASE : int = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__SCREAMING_SNAKE_CASE : int = key.split('''.''' )
__SCREAMING_SNAKE_CASE : Tuple = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__SCREAMING_SNAKE_CASE : List[str] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
__SCREAMING_SNAKE_CASE : Tuple = key_components[-2] + '''_v'''
if name is not None:
__SCREAMING_SNAKE_CASE : List[Any] = key_components[:-3] + [name]
__SCREAMING_SNAKE_CASE : Any = '''.'''.join(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = key
if flax_key in special_pt_names:
__SCREAMING_SNAKE_CASE : List[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
__SCREAMING_SNAKE_CASE : Optional[int] = np.asarray(lowercase__ ) if not isinstance(lowercase__ , np.ndarray ) else flax_tensor
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(lowercase__ )
# remove from missing keys
missing_keys.remove(lowercase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase__ )
pt_model.load_state_dict(lowercase__ )
# re-transform missing_keys to list
__SCREAMING_SNAKE_CASE : List[str] = list(lowercase__ )
if len(lowercase__ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(lowercase__ ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 696 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self :int ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = '''french fries'''
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5
__SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae''']
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
__SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = self.get_inputs()
__SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : str = self.get_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = 0
def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__( self :List[str] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) )
__SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix'''
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
__SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 696 | 1 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _UpperCamelCase ( lowercase__ ):
if not is_accelerate_available():
return method
__SCREAMING_SNAKE_CASE : List[Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase__ ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *lowercase__ , **lowercase__ ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase__ , **lowercase__ )
return wrapper
| 696 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 1
@register_to_config
def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]:
if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE : Any = 1.0
# setable values
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) )
def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor:
return sample
def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.timesteps += self.config.steps_offset
def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
__SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep]
__SCREAMING_SNAKE_CASE : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__SCREAMING_SNAKE_CASE : List[Any] = model_output
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE : List[str] = model_output
__SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __len__( self :Optional[int] ) -> List[Any]:
return self.config.num_train_timesteps
| 696 | 1 |
class _lowercase :
'''simple docstring'''
def __init__( self :Dict , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = graph
self._normalize_graph(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = None
def __magic_name__( self :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple ) -> Any:
if sources is int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [sources]
if sinks is int:
__SCREAMING_SNAKE_CASE : int = [sinks]
if len(lowerCAmelCase__ ) == 0 or len(lowerCAmelCase__ ) == 0:
return
__SCREAMING_SNAKE_CASE : List[Any] = sources[0]
__SCREAMING_SNAKE_CASE : Tuple = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowerCAmelCase__ ) > 1 or len(lowerCAmelCase__ ) > 1:
__SCREAMING_SNAKE_CASE : Tuple = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__SCREAMING_SNAKE_CASE : str = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__SCREAMING_SNAKE_CASE : Optional[int] = max_input_flow
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : Tuple = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__SCREAMING_SNAKE_CASE : Optional[int] = max_input_flow
__SCREAMING_SNAKE_CASE : Any = size - 1
def __magic_name__( self :List[Any] ) -> str:
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __magic_name__( self :Dict , lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : str = algorithm(self )
class _lowercase :
'''simple docstring'''
def __init__( self :Tuple , lowerCAmelCase__ :Any ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[str] = flow_network
__SCREAMING_SNAKE_CASE : Any = flow_network.verticesCount
__SCREAMING_SNAKE_CASE : int = flow_network.sourceIndex
__SCREAMING_SNAKE_CASE : Dict = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__SCREAMING_SNAKE_CASE : List[Any] = flow_network.graph
__SCREAMING_SNAKE_CASE : int = False
def __magic_name__( self :str ) -> str:
if not self.executed:
self._algorithm()
__SCREAMING_SNAKE_CASE : Optional[Any] = True
def __magic_name__( self :Optional[int] ) -> Optional[Any]:
pass
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Optional[Any] ) -> Tuple:
super().__init__(lowerCAmelCase__ )
# use this to save your result
__SCREAMING_SNAKE_CASE : str = -1
def __magic_name__( self :str ) -> str:
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :str , lowerCAmelCase__ :List[str] ) -> List[str]:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = [[0] * self.verticies_count for i in range(self.verticies_count )]
__SCREAMING_SNAKE_CASE : List[Any] = [0] * self.verticies_count
__SCREAMING_SNAKE_CASE : List[str] = [0] * self.verticies_count
def __magic_name__( self :Any ) -> str:
__SCREAMING_SNAKE_CASE : Tuple = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__SCREAMING_SNAKE_CASE : int = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__SCREAMING_SNAKE_CASE : List[str] = 0
while i < len(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = vertices_list[i]
__SCREAMING_SNAKE_CASE : int = self.heights[vertex_index]
self.process_vertex(lowerCAmelCase__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : int = 0
else:
i += 1
__SCREAMING_SNAKE_CASE : Dict = sum(self.preflow[self.source_index] )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple ) -> Optional[Any]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowerCAmelCase__ , lowerCAmelCase__ )
self.relabel(lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple ) -> Any:
__SCREAMING_SNAKE_CASE : Tuple = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE : str = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__SCREAMING_SNAKE_CASE : Dict = self.heights[to_index]
if min_height is not None:
__SCREAMING_SNAKE_CASE : Dict = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : str =[0]
__lowerCAmelCase : List[str] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : List[Any] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Any =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : str =flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 696 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : str = "text"
SCREAMING_SNAKE_CASE__ : str = "summary"
@property
def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 696 | 1 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Union[str, Any]=7 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=False , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :str=99 , lowerCAmelCase__ :Any=64 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :int=64 , lowerCAmelCase__ :Union[str, Any]="gelu" , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :List[Any]=16 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :int=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :List[str]=None , ) -> List[Any]:
__SCREAMING_SNAKE_CASE : int = parent
__SCREAMING_SNAKE_CASE : List[Any] = batch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = seq_length
__SCREAMING_SNAKE_CASE : Tuple = is_training
__SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask
__SCREAMING_SNAKE_CASE : Optional[Any] = use_token_type_ids
__SCREAMING_SNAKE_CASE : int = use_labels
__SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
__SCREAMING_SNAKE_CASE : str = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : List[str] = num_choices
__SCREAMING_SNAKE_CASE : Any = scope
def __magic_name__( self :Dict ) -> Tuple:
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def __magic_name__( self :Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__( self :int ) -> Dict:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __magic_name__( self :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE : Optional[Any] = MPNetModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__( self :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = MPNetForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__( self :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = MPNetForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
__SCREAMING_SNAKE_CASE : str = MPNetForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Tuple = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__( self :Dict , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[str] = self.num_labels
__SCREAMING_SNAKE_CASE : List[str] = MPNetForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : Union[str, Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Tuple = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = True
def __magic_name__( self :Optional[int] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Any = MPNetModelTester(self )
__SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__( self :str ) -> Dict:
self.config_tester.run_common_tests()
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> int:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowerCAmelCase__ )
def __magic_name__( self :Any ) -> str:
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowerCAmelCase__ )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
__SCREAMING_SNAKE_CASE : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 696 |
def _UpperCamelCase ( lowercase__ = 10**9 ):
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Any = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
__lowerCAmelCase : str ={
'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''dinat'''
SCREAMING_SNAKE_CASE__ : Dict = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Tuple , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Optional[Any]=[3, 4, 6, 5] , lowerCAmelCase__ :Optional[int]=[2, 4, 8, 16] , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCAmelCase__ :Any=3.0 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.0 , lowerCAmelCase__ :List[Any]=0.0 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :Union[str, Any]=1E-5 , lowerCAmelCase__ :List[str]=0.0 , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :List[str]=None , **lowerCAmelCase__ :str , ) -> Any:
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = patch_size
__SCREAMING_SNAKE_CASE : str = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
__SCREAMING_SNAKE_CASE : Union[str, Any] = depths
__SCREAMING_SNAKE_CASE : Dict = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = num_heads
__SCREAMING_SNAKE_CASE : Tuple = kernel_size
__SCREAMING_SNAKE_CASE : Any = dilations
__SCREAMING_SNAKE_CASE : Dict = mlp_ratio
__SCREAMING_SNAKE_CASE : Tuple = qkv_bias
__SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Any = drop_path_rate
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__SCREAMING_SNAKE_CASE : str = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
__SCREAMING_SNAKE_CASE : Optional[Any] = layer_scale_init_value
__SCREAMING_SNAKE_CASE : Any = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase__ ) + 1 )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE : str = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
if a[i].islower():
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''levit'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :Union[str, Any]=224 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :int=3 , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :Union[str, Any]=1 , lowerCAmelCase__ :Optional[int]=16 , lowerCAmelCase__ :Tuple=[128, 256, 384] , lowerCAmelCase__ :Dict=[4, 8, 12] , lowerCAmelCase__ :str=[4, 4, 4] , lowerCAmelCase__ :Any=[16, 16, 16] , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :int=[2, 2, 2] , lowerCAmelCase__ :List[Any]=[2, 2, 2] , lowerCAmelCase__ :Dict=0.02 , **lowerCAmelCase__ :List[Any] , ) -> Optional[int]:
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Any = num_channels
__SCREAMING_SNAKE_CASE : Optional[Any] = kernel_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = stride
__SCREAMING_SNAKE_CASE : List[str] = padding
__SCREAMING_SNAKE_CASE : List[Any] = hidden_sizes
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = depths
__SCREAMING_SNAKE_CASE : Any = key_dim
__SCREAMING_SNAKE_CASE : str = drop_path_rate
__SCREAMING_SNAKE_CASE : int = patch_size
__SCREAMING_SNAKE_CASE : str = attention_ratio
__SCREAMING_SNAKE_CASE : Union[str, Any] = mlp_ratio
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : Any = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = version.parse('''1.11''' )
@property
def __magic_name__( self :int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __magic_name__( self :int ) -> float:
return 1E-4
| 696 |
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase : str ='\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__lowerCAmelCase : Tuple ='\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__lowerCAmelCase : Optional[int] ='\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[int] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False ) -> int:
if return_pvalue:
__SCREAMING_SNAKE_CASE : int = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )}
| 696 | 1 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCAmelCase : Dict =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Tuple , *lowerCAmelCase__ :str , **lowerCAmelCase__ :List[Any] ) -> None:
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 696 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : int ={
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2}
__lowerCAmelCase : Union[str, Any] ={}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer
def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
__SCREAMING_SNAKE_CASE : str = strip_accents
__SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = do_lower_case
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 696 | 1 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__lowerCAmelCase : int ='https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__lowerCAmelCase : List[str] =requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
__lowerCAmelCase : List[Any] =BeautifulSoup(res.text, 'html.parser')
__lowerCAmelCase : Dict =list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get("href")}""")
| 696 |
import os
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = len(grid[0] )
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Dict = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowercase__ ):
for j in range(n_rows - 3 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__SCREAMING_SNAKE_CASE : Optional[int] = max(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if max_product > largest:
__SCREAMING_SNAKE_CASE : Tuple = max_product
return largest
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
with open(os.path.dirname(lowercase__ ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
__SCREAMING_SNAKE_CASE : str = [[int(lowercase__ ) for i in grid[j]] for j in range(len(lowercase__ ) )]
return largest_product(lowercase__ )
if __name__ == "__main__":
print(solution())
| 696 | 1 |
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : str = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = 0
while number > 0:
__SCREAMING_SNAKE_CASE : List[str] = number % 10
sum_of_digits += last_digit
__SCREAMING_SNAKE_CASE : Dict = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _UpperCamelCase ( lowercase__ = 100 ):
__SCREAMING_SNAKE_CASE : Any = factorial(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = split_and_add(lowercase__ )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 696 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) )
class _lowercase :
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier
__SCREAMING_SNAKE_CASE : Dict = min_depth
__SCREAMING_SNAKE_CASE : List[str] = tf_padding
__SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier )
__SCREAMING_SNAKE_CASE : List[str] = output_stride
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = scope
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __magic_name__( self :Dict ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __magic_name__( self :List[Any] ) -> List[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __magic_name__( self :Any ) -> Dict:
pass
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Optional[int] = 26
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :List[str] ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
__SCREAMING_SNAKE_CASE : int = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 696 | 1 |
def _UpperCamelCase ( lowercase__ = 600851475143 ):
try:
__SCREAMING_SNAKE_CASE : Optional[int] = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : Tuple = 2
while i * i <= n:
while n % i == 0:
__SCREAMING_SNAKE_CASE : int = i
n //= i
i += 1
if n > 1:
__SCREAMING_SNAKE_CASE : Optional[Any] = n
return int(lowercase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 |
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase : List[Any] =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = Github(os.environ['''GITHUB_TOKEN'''] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = g.get_repo('''huggingface/diffusers''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 696 | 1 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowerCAmelCase : Tuple =[
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def _UpperCamelCase ( lowercase__ ):
for pegasus_name, hf_name in PATTERNS:
__SCREAMING_SNAKE_CASE : List[str] = k.replace(lowercase__ , lowercase__ )
return k
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = DEFAULTS.copy()
cfg_kwargs.update(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = PegasusConfig(**lowercase__ )
__SCREAMING_SNAKE_CASE : int = PegasusForConditionalGeneration(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = torch_model.model.state_dict()
__SCREAMING_SNAKE_CASE : str = {}
for k, v in tf_weights.items():
__SCREAMING_SNAKE_CASE : Optional[int] = rename_state_dict_key(lowercase__ )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
__SCREAMING_SNAKE_CASE : str = v.T
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowercase__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
__SCREAMING_SNAKE_CASE : int = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__SCREAMING_SNAKE_CASE : Optional[int] = mapping['''shared.weight''']
__SCREAMING_SNAKE_CASE : Any = mapping['''shared.weight''']
__SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(lowercase__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(lowercase__ , strict=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase ( lowercase__="./ckpt/aeslc/model.ckpt-32000" ):
__SCREAMING_SNAKE_CASE : int = tf.train.list_variables(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(lowercase__ , desc='''converting tf checkpoint to dict''' ):
__SCREAMING_SNAKE_CASE : List[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
__SCREAMING_SNAKE_CASE : int = tf.train.load_variable(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = array
return tf_weights
def _UpperCamelCase ( lowercase__ , lowercase__ ):
# save tokenizer first
__SCREAMING_SNAKE_CASE : Optional[int] = Path(lowercase__ ).parent.name
__SCREAMING_SNAKE_CASE : str = task_specific_params[F'''summarization_{dataset}''']['''max_position_embeddings''']
__SCREAMING_SNAKE_CASE : str = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=lowercase__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(lowercase__ )
# convert model
__SCREAMING_SNAKE_CASE : Dict = get_tf_weights_as_numpy(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
__SCREAMING_SNAKE_CASE : Tuple = task_specific_params
__SCREAMING_SNAKE_CASE : Optional[int] = convert_pegasus(lowercase__ , lowercase__ )
torch_model.save_pretrained(lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(lowercase__ , Path(lowercase__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCAmelCase : Any =parser.parse_args()
if args.save_dir is None:
__lowerCAmelCase : List[str] =Path(args.tf_ckpt_path).parent.name
__lowerCAmelCase : Optional[Any] =os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''canine'''
def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
# Character config:
__SCREAMING_SNAKE_CASE : Tuple = downsampling_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size
__SCREAMING_SNAKE_CASE : Any = num_hash_functions
__SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets
__SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
| 696 | 1 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowercase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowercase__ , lowercase__ , )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : list[list[str]] = []
depth_first_search([] , [] , [] , lowercase__ , lowercase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowercase__ )
print('''''' )
print(len(lowercase__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''mems''']
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = []
self.cutoffs.extend(lowerCAmelCase__ )
if proj_share_all_but_first:
__SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs )
else:
__SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs )
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed
__SCREAMING_SNAKE_CASE : Tuple = d_head
__SCREAMING_SNAKE_CASE : Dict = d_inner
__SCREAMING_SNAKE_CASE : Optional[Any] = div_val
__SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm
__SCREAMING_SNAKE_CASE : List[str] = n_layer
__SCREAMING_SNAKE_CASE : int = n_head
__SCREAMING_SNAKE_CASE : str = mem_len
__SCREAMING_SNAKE_CASE : Union[str, Any] = same_length
__SCREAMING_SNAKE_CASE : str = attn_type
__SCREAMING_SNAKE_CASE : Dict = clamp_len
__SCREAMING_SNAKE_CASE : Tuple = sample_softmax
__SCREAMING_SNAKE_CASE : Optional[int] = adaptive
__SCREAMING_SNAKE_CASE : int = dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = dropatt
__SCREAMING_SNAKE_CASE : int = untie_r
__SCREAMING_SNAKE_CASE : Optional[int] = init
__SCREAMING_SNAKE_CASE : List[str] = init_range
__SCREAMING_SNAKE_CASE : Any = proj_init_std
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :str ) -> int:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 696 | 1 |
import re
def _UpperCamelCase ( lowercase__ ):
if len(re.findall('''[ATCG]''' , lowercase__ ) ) != len(lowercase__ ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : Any ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert'''
def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = position_embedding_type
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
| 696 | 1 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple=13 , lowerCAmelCase__ :Optional[int]=30 , lowerCAmelCase__ :Tuple=2 , lowerCAmelCase__ :int=3 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :List[Any]=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :List[str]=10 , lowerCAmelCase__ :Optional[int]=0.02 , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : str = batch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = image_size
__SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
__SCREAMING_SNAKE_CASE : Dict = num_channels
__SCREAMING_SNAKE_CASE : Optional[int] = is_training
__SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
__SCREAMING_SNAKE_CASE : Tuple = hidden_size
__SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE : Dict = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE : str = num_patches + 1
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : List[Any] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values
def __magic_name__( self :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE : str = FlaxViTModel(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE : Optional[int] = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE : Optional[int] = (self.patch_size, self.patch_size)
__SCREAMING_SNAKE_CASE : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def __magic_name__( self :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE : str = FlaxViTForImageClassification(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE : Any = 1
__SCREAMING_SNAKE_CASE : Dict = FlaxViTForImageClassification(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __magic_name__( self :Any ) -> None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxViTModelTester(self )
__SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__( self :List[Any] ) -> str:
self.config_tester.run_common_tests()
def __magic_name__( self :Optional[int] ) -> str:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> Optional[int]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :int ) -> Optional[int]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = model_class(lowerCAmelCase__ )
@jax.jit
def model_jitted(lowerCAmelCase__ :int , **lowerCAmelCase__ :Tuple ):
return model(pixel_values=lowerCAmelCase__ , **lowerCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
__SCREAMING_SNAKE_CASE : Optional[int] = model_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __magic_name__( self :Optional[Any] ) -> Tuple:
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE : int = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
__SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCAmelCase__ )
| 696 |
import os
import sys
import unittest
__lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers')
__lowerCAmelCase : Optional[Any] ='\n{0} = None\n'
__lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
__lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tokenizers''' )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' )
__SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' )
__SCREAMING_SNAKE_CASE : Any = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' )
__SCREAMING_SNAKE_CASE : List[str] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCAmelCase__ )
self.assertIn('''tensorflow_text''' , lowerCAmelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' )
__SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__SCREAMING_SNAKE_CASE : int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
| 696 | 1 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__lowerCAmelCase : str =(
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__lowerCAmelCase : list[int] =[ord(letter) for letter in string.ascii_lowercase]
__lowerCAmelCase : set[int] ={ord(char) for char in VALID_CHARS}
__lowerCAmelCase : list[str] =["the", "be", "to", "of", "and", "in", "that", "have"]
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = ""
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : int
for keychar, cipherchar in zip(cycle(lowercase__ ) , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase__ )
return decoded
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : list[str] = []
for key in product(lowercase__ , repeat=3 ):
__SCREAMING_SNAKE_CASE : List[Any] = try_key(lowercase__ , lowercase__ )
if encoded is not None:
possibles.append(lowercase__ )
return possibles
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return [possible for possible in possibles if common_word in possible.lower()]
def _UpperCamelCase ( lowercase__ = "p059_cipher.txt" ):
__SCREAMING_SNAKE_CASE : list[int]
__SCREAMING_SNAKE_CASE : list[str]
__SCREAMING_SNAKE_CASE : str
__SCREAMING_SNAKE_CASE : str
__SCREAMING_SNAKE_CASE : str = Path(lowercase__ ).parent.joinpath(lowercase__ ).read_text(encoding='''utf-8''' )
__SCREAMING_SNAKE_CASE : Dict = [int(lowercase__ ) for number in data.strip().split(''',''' )]
__SCREAMING_SNAKE_CASE : List[str] = filter_valid_chars(lowercase__ )
for common_word in COMMON_WORDS:
__SCREAMING_SNAKE_CASE : Optional[int] = filter_common_word(lowercase__ , lowercase__ )
if len(lowercase__ ) == 1:
break
__SCREAMING_SNAKE_CASE : Dict = possibles[0]
return sum(ord(lowercase__ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 |
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCamelCase ( lowercase__ ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0]
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return math.pow(lowercase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 696 | 1 |
import os
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = len(grid[0] )
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Dict = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowercase__ ):
for j in range(n_rows - 3 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__SCREAMING_SNAKE_CASE : Optional[int] = max(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if max_product > largest:
__SCREAMING_SNAKE_CASE : Tuple = max_product
return largest
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
with open(os.path.dirname(lowercase__ ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
__SCREAMING_SNAKE_CASE : str = [[int(lowercase__ ) for i in grid[j]] for j in range(len(lowercase__ ) )]
return largest_product(lowercase__ )
if __name__ == "__main__":
print(solution())
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__SCREAMING_SNAKE_CASE : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
from jiwer import compute_measures
import datasets
__lowerCAmelCase : str ='\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__lowerCAmelCase : Dict ='\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__lowerCAmelCase : Tuple ='\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[str]=False ) -> int:
if concatenate_texts:
return compute_measures(lowerCAmelCase__ , lowerCAmelCase__ )["wer"]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
for prediction, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : str = compute_measures(lowerCAmelCase__ , lowerCAmelCase__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 696 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
from datetime import datetime
import requests
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowercase__ ).content
if __name__ == "__main__":
__lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip()
__lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 696 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase : int ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_2_8,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 5_0,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 1_0,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 1_0,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __magic_name__( cls :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :List[str] ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :Dict ) -> Optional[int]:
CustomConfig.register_for_auto_class()
__SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int
__SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float
__SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool
__SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = PretrainedConfig()
__SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(lowerCAmelCase__ )}.''' )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
__SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock()
__SCREAMING_SNAKE_CASE : List[Any] = 500
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError
__SCREAMING_SNAKE_CASE : str = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head:
__SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
# This test is for deprecated behavior and can be removed in v5
__SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __magic_name__( self :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json''']
__SCREAMING_SNAKE_CASE : Tuple = 768
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) )
__SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __magic_name__( self :List[str] ) -> Union[str, Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__SCREAMING_SNAKE_CASE : int = '''v4.0.0'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0'''
__SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 696 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''timm_backbone'''
def __init__( self :Optional[int] , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :int=None , **lowerCAmelCase__ :Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = backbone
__SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
__SCREAMING_SNAKE_CASE : str = features_only
__SCREAMING_SNAKE_CASE : List[Any] = use_pretrained_backbone
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : Any = out_indices if out_indices is not None else (-1,)
| 696 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 | 1 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Any = OmegaConf.load(lowercase__ )
if display:
print(yaml.dump(OmegaConf.to_container(lowercase__ ) ) )
return config
def _UpperCamelCase ( lowercase__ , lowercase__=None , lowercase__=None ):
if conf_path is None:
__SCREAMING_SNAKE_CASE : str = '''./model_checkpoints/vqgan_only.yaml'''
__SCREAMING_SNAKE_CASE : int = load_config(lowercase__ , display=lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = VQModel(**config.model.params )
if ckpt_path is None:
__SCREAMING_SNAKE_CASE : List[Any] = '''./model_checkpoints/vqgan_only.pt'''
__SCREAMING_SNAKE_CASE : Any = torch.load(lowercase__ , map_location=lowercase__ )
if ".ckpt" in ckpt_path:
__SCREAMING_SNAKE_CASE : Dict = sd['''state_dict''']
model.load_state_dict(lowercase__ , strict=lowercase__ )
model.to(lowercase__ )
del sd
return model
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = model.encode(lowercase__ )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__SCREAMING_SNAKE_CASE : str = model.decode(lowercase__ )
return xrec
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = string.rsplit('''.''' , 1 )
if reload:
__SCREAMING_SNAKE_CASE : Optional[Any] = importlib.import_module(lowercase__ )
importlib.reload(lowercase__ )
return getattr(importlib.import_module(lowercase__ , package=lowercase__ ) , cls )
def _UpperCamelCase ( lowercase__ ):
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=True , lowercase__=True ):
__SCREAMING_SNAKE_CASE : Dict = instantiate_from_config(lowercase__ )
if sd is not None:
model.load_state_dict(lowercase__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# load the specified checkpoint
if ckpt:
__SCREAMING_SNAKE_CASE : Dict = torch.load(lowercase__ , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : Dict = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = {'''state_dict''': None}
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Tuple = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=lowercase__ , eval_mode=lowercase__ )['''model''']
return model, global_step
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Optional[int] = d_kv
__SCREAMING_SNAKE_CASE : Tuple = d_ff
__SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE : List[Any] = num_heads
__SCREAMING_SNAKE_CASE : List[Any] = num_experts
__SCREAMING_SNAKE_CASE : Tuple = expert_capacity
__SCREAMING_SNAKE_CASE : List[Any] = router_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__SCREAMING_SNAKE_CASE : List[Any] = router_dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate
__SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : int = initializer_factor
__SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj
__SCREAMING_SNAKE_CASE : Any = use_cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs
__SCREAMING_SNAKE_CASE : int = router_z_loss_coef
__SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
__SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
__SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 696 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :int ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__SCREAMING_SNAKE_CASE : List[Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = [t[-1] for t in os.walk(os.path.join(lowerCAmelCase__ , os.listdir(lowerCAmelCase__ )[0] , '''snapshots''' ) )]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Any ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__SCREAMING_SNAKE_CASE : List[Any] = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Any = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
__SCREAMING_SNAKE_CASE : Union[str, Any] = replicate(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = shard(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
__SCREAMING_SNAKE_CASE : str = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCAmelCase__ ) == num_samples
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Tuple = 50
__SCREAMING_SNAKE_CASE : List[str] = jax.device_count()
__SCREAMING_SNAKE_CASE : List[str] = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
__SCREAMING_SNAKE_CASE : Any = replicate(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = shard(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def __magic_name__( self :Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Any = 50
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.device_count()
__SCREAMING_SNAKE_CASE : int = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Dict = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
__SCREAMING_SNAKE_CASE : str = replicate(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = shard(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def __magic_name__( self :int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
__SCREAMING_SNAKE_CASE : int = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : List[Any] = 50
__SCREAMING_SNAKE_CASE : List[Any] = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : int = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
__SCREAMING_SNAKE_CASE : Optional[int] = replicate(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = shard(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def __magic_name__( self :Dict ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = scheduler.create_state()
__SCREAMING_SNAKE_CASE : List[Any] = scheduler_state
__SCREAMING_SNAKE_CASE : Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__SCREAMING_SNAKE_CASE : List[Any] = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : List[str] = 50
__SCREAMING_SNAKE_CASE : str = jax.device_count()
__SCREAMING_SNAKE_CASE : List[str] = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : List[Any] = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
__SCREAMING_SNAKE_CASE : Tuple = replicate(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = shard(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def __magic_name__( self :int ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.device_count()
__SCREAMING_SNAKE_CASE : int = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : str = jax.random.split(jax.random.PRNGKey(0 ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = replicate(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = pipeline.prepare_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = shard(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[str] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase__ , use_memory_efficient_attention=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = replicate(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = pipeline.prepare_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = shard(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 696 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 696 | 1 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__lowerCAmelCase : Any =open # noqa: we just need to have a builtin inside this module to test it properly
| 696 |
from datetime import datetime
import requests
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowercase__ ).content
if __name__ == "__main__":
__lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip()
__lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 696 | 1 |
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str=100 , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :List[str]=30 , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :int=3 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :str=32 , lowerCAmelCase__ :str=5 , lowerCAmelCase__ :Optional[int]=4 , lowerCAmelCase__ :Optional[Any]=37 , lowerCAmelCase__ :List[str]="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[Any]=10 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Union[str, Any]=3 , ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
__SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[str] = image_size
__SCREAMING_SNAKE_CASE : Tuple = patch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
__SCREAMING_SNAKE_CASE : Optional[int] = is_training
__SCREAMING_SNAKE_CASE : int = use_labels
__SCREAMING_SNAKE_CASE : Tuple = hidden_size
__SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : int = hidden_act
__SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : str = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE : List[Any] = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE : List[Any] = num_patches + 1
def __magic_name__( self :str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : str = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : List[str] = FlaxBeitModel(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Dict , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : str = FlaxBeitForMaskedImageModeling(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __magic_name__( self :int , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE : Tuple = FlaxBeitForImageClassification(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE : Tuple = 1
__SCREAMING_SNAKE_CASE : Dict = FlaxBeitForImageClassification(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Any:
__SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : int = config_and_inputs
__SCREAMING_SNAKE_CASE : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def __magic_name__( self :Optional[Any] ) -> None:
__SCREAMING_SNAKE_CASE : List[str] = FlaxBeitModelTester(self )
__SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__( self :Optional[Any] ) -> Tuple:
self.config_tester.run_common_tests()
def __magic_name__( self :List[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Dict = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : List[Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model_class(lowerCAmelCase__ )
@jax.jit
def model_jitted(lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Tuple ):
return model(pixel_values=lowerCAmelCase__ , **lowerCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
__SCREAMING_SNAKE_CASE : List[str] = model_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE : int = model_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __magic_name__( self :Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__( self :Any ) -> List[str]:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Tuple ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :int ) -> Tuple:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __magic_name__( self :List[Any] ) -> int:
__SCREAMING_SNAKE_CASE : Any = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
__SCREAMING_SNAKE_CASE : int = self.default_image_processor
__SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
__SCREAMING_SNAKE_CASE : List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
__SCREAMING_SNAKE_CASE : List[Any] = np.ones((1, 196) , dtype=lowerCAmelCase__ )
# forward pass
__SCREAMING_SNAKE_CASE : Optional[Any] = model(pixel_values=lowerCAmelCase__ , bool_masked_pos=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = outputs.logits
# verify the logits
__SCREAMING_SNAKE_CASE : Any = (1, 196, 8_192)
self.assertEqual(logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , lowerCAmelCase__ , atol=1E-2 ) )
@slow
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
__SCREAMING_SNAKE_CASE : Any = self.default_image_processor
__SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
__SCREAMING_SNAKE_CASE : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors='''np''' )
# forward pass
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
__SCREAMING_SNAKE_CASE : List[str] = (1, 1_000)
self.assertEqual(logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
__SCREAMING_SNAKE_CASE : Dict = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase__ )
@slow
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Any = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
__SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor
__SCREAMING_SNAKE_CASE : Any = prepare_img()
__SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=lowerCAmelCase__ , return_tensors='''np''' )
# forward pass
__SCREAMING_SNAKE_CASE : Optional[int] = model(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
__SCREAMING_SNAKE_CASE : Tuple = (1, 21_841)
self.assertEqual(logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
__SCREAMING_SNAKE_CASE : str = 2_396
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase__ )
| 696 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self :int ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = '''french fries'''
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5
__SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae''']
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
__SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = self.get_inputs()
__SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : str = self.get_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = 0
def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__( self :List[str] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) )
__SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix'''
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
__SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 696 | 1 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ : int = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def __magic_name__( self :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int=False ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any]=13 , lowerCAmelCase__ :int=7 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=99 , lowerCAmelCase__ :Dict=32 , lowerCAmelCase__ :Tuple=32 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :List[Any]=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :int=None , ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : Tuple = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : List[str] = is_training
__SCREAMING_SNAKE_CASE : Dict = use_input_mask
__SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
__SCREAMING_SNAKE_CASE : Tuple = use_labels
__SCREAMING_SNAKE_CASE : int = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
__SCREAMING_SNAKE_CASE : int = hidden_act
__SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
__SCREAMING_SNAKE_CASE : List[str] = num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : Optional[Any] = embedding_size
def __magic_name__( self :List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : int = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple ) -> int:
__SCREAMING_SNAKE_CASE : Dict = TFMobileBertModel(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = [input_ids, input_mask]
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = TFMobileBertForMaskedLM(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = TFMobileBertForNextSentencePrediction(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__( self :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = TFMobileBertForPreTraining(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = TFMobileBertForSequenceClassification(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Dict = self.num_choices
__SCREAMING_SNAKE_CASE : str = TFMobileBertForMultipleChoice(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : Any = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : int = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__( self :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str ) -> int:
__SCREAMING_SNAKE_CASE : str = self.num_labels
__SCREAMING_SNAKE_CASE : str = TFMobileBertForTokenClassification(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TFMobileBertForQuestionAnswering(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def __magic_name__( self :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : str = TFMobileBertModelTest.TFMobileBertModelTester(self )
__SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__( self :Dict ) -> Tuple:
self.config_tester.run_common_tests()
def __magic_name__( self :Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase__ )
def __magic_name__( self :Optional[int] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> Any:
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase__ )
def __magic_name__( self :Optional[int] ) -> str:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__SCREAMING_SNAKE_CASE : Dict = TFMobileBertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
__SCREAMING_SNAKE_CASE : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = [1, 6, 30_522]
self.assertEqual(output.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
| 696 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 1
@register_to_config
def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]:
if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE : Any = 1.0
# setable values
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) )
def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor:
return sample
def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.timesteps += self.config.steps_offset
def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
__SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep]
__SCREAMING_SNAKE_CASE : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__SCREAMING_SNAKE_CASE : List[Any] = model_output
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE : List[str] = model_output
__SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __len__( self :Optional[int] ) -> List[Any]:
return self.config.num_train_timesteps
| 696 | 1 |
import logging
import os
from .state import PartialState
class _lowercase ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def __magic_name__( lowerCAmelCase__ :List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Optional[int] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __magic_name__( self :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :str ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
__SCREAMING_SNAKE_CASE : str = kwargs.pop('''main_process_only''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = kwargs.pop('''in_order''' , lowerCAmelCase__ )
if self.isEnabledFor(lowerCAmelCase__ ):
if self._should_log(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = self.process(lowerCAmelCase__ , lowerCAmelCase__ )
self.logger.log(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
elif in_order:
__SCREAMING_SNAKE_CASE : List[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = self.process(lowerCAmelCase__ , lowerCAmelCase__ )
self.logger.log(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
state.wait_for_everyone()
def _UpperCamelCase ( lowercase__ , lowercase__ = None ):
if log_level is None:
__SCREAMING_SNAKE_CASE : List[str] = os.environ.get('''ACCELERATE_LOG_LEVEL''' , lowercase__ )
__SCREAMING_SNAKE_CASE : str = logging.getLogger(lowercase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowercase__ , {} )
| 696 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : str = "text"
SCREAMING_SNAKE_CASE__ : str = "summary"
@property
def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 696 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = '''yolos'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Any=768 , lowerCAmelCase__ :Optional[Any]=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :List[str]=3_072 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :List[Any]=0.0 , lowerCAmelCase__ :Optional[int]=0.0 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Optional[Any]=[512, 864] , lowerCAmelCase__ :Optional[int]=16 , lowerCAmelCase__ :Tuple=3 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Tuple=100 , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :str=1 , lowerCAmelCase__ :str=5 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :str=5 , lowerCAmelCase__ :Tuple=2 , lowerCAmelCase__ :Optional[int]=0.1 , **lowerCAmelCase__ :int , ) -> Optional[Any]:
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
__SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Any = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Any = initializer_range
__SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
__SCREAMING_SNAKE_CASE : List[str] = image_size
__SCREAMING_SNAKE_CASE : Dict = patch_size
__SCREAMING_SNAKE_CASE : Dict = num_channels
__SCREAMING_SNAKE_CASE : int = qkv_bias
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_detection_tokens
__SCREAMING_SNAKE_CASE : Tuple = use_mid_position_embeddings
__SCREAMING_SNAKE_CASE : int = auxiliary_loss
# Hungarian matcher
__SCREAMING_SNAKE_CASE : str = class_cost
__SCREAMING_SNAKE_CASE : int = bbox_cost
__SCREAMING_SNAKE_CASE : Optional[Any] = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE : Optional[int] = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE : List[str] = giou_loss_coefficient
__SCREAMING_SNAKE_CASE : List[Any] = eos_coefficient
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = version.parse('''1.11''' )
@property
def __magic_name__( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __magic_name__( self :str ) -> float:
return 1E-4
@property
def __magic_name__( self :Optional[int] ) -> int:
return 12
| 696 |
def _UpperCamelCase ( lowercase__ = 10**9 ):
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Any = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 | 1 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowercase :
'''simple docstring'''
@staticmethod
def __magic_name__( *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :Optional[Any] ) -> Any:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __magic_name__( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ObjectDetectionPipeline(model=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __magic_name__( self :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> Any:
__SCREAMING_SNAKE_CASE : str = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(lowerCAmelCase__ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowerCAmelCase__ , {
'''score''': ANY(lowerCAmelCase__ ),
'''label''': ANY(lowerCAmelCase__ ),
'''box''': {'''xmin''': ANY(lowerCAmelCase__ ), '''ymin''': ANY(lowerCAmelCase__ ), '''xmax''': ANY(lowerCAmelCase__ ), '''ymax''': ANY(lowerCAmelCase__ )},
} , )
import datasets
__SCREAMING_SNAKE_CASE : Tuple = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
__SCREAMING_SNAKE_CASE : List[str] = object_detector(lowerCAmelCase__ , threshold=0.0 )
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for outputs in batch_outputs:
self.assertGreater(len(lowerCAmelCase__ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowerCAmelCase__ , {
'''score''': ANY(lowerCAmelCase__ ),
'''label''': ANY(lowerCAmelCase__ ),
'''box''': {'''xmin''': ANY(lowerCAmelCase__ ), '''ymin''': ANY(lowerCAmelCase__ ), '''xmax''': ANY(lowerCAmelCase__ ), '''ymax''': ANY(lowerCAmelCase__ )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def __magic_name__( self :Optional[int] ) -> str:
pass
@require_torch
def __magic_name__( self :List[str] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
__SCREAMING_SNAKE_CASE : Tuple = AutoModelForObjectDetection.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = AutoFeatureExtractor.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = ObjectDetectionPipeline(model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
] , )
__SCREAMING_SNAKE_CASE : str = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
] , )
@require_torch
@slow
def __magic_name__( self :List[Any] ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''facebook/detr-resnet-50'''
__SCREAMING_SNAKE_CASE : str = AutoModelForObjectDetection.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = ObjectDetectionPipeline(model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
__SCREAMING_SNAKE_CASE : List[Any] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def __magic_name__( self :List[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''facebook/detr-resnet-50'''
__SCREAMING_SNAKE_CASE : List[str] = pipeline('''object-detection''' , model=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
__SCREAMING_SNAKE_CASE : Optional[int] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : str = 0.9985
__SCREAMING_SNAKE_CASE : List[str] = '''facebook/detr-resnet-50'''
__SCREAMING_SNAKE_CASE : Dict = pipeline('''object-detection''' , model=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __magic_name__( self :Tuple ) -> Any:
__SCREAMING_SNAKE_CASE : Dict = '''Narsil/layoutlmv3-finetuned-funsd'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.9993
__SCREAMING_SNAKE_CASE : Tuple = pipeline('''object-detection''' , model=lowerCAmelCase__ , threshold=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
] , )
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE : str = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
if a[i].islower():
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__SCREAMING_SNAKE_CASE : List[Any] = n - k
# Calculate C(n,k)
for i in range(lowercase__ ):
result *= n - i
result //= i + 1
return result
def _UpperCamelCase ( lowercase__ ):
return binomial_coefficient(2 * node_count , lowercase__ ) // (node_count + 1)
def _UpperCamelCase ( lowercase__ ):
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
__SCREAMING_SNAKE_CASE : int = 1
for i in range(1 , n + 1 ):
result *= i
return result
def _UpperCamelCase ( lowercase__ ):
return catalan_number(lowercase__ ) * factorial(lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] =int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
f"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
f"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 696 |
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase : str ='\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__lowerCAmelCase : Tuple ='\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__lowerCAmelCase : Optional[int] ='\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[int] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False ) -> int:
if return_pvalue:
__SCREAMING_SNAKE_CASE : int = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )}
| 696 | 1 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int = None , lowerCAmelCase__ :int = None ) -> Any:
super().__init__()
__SCREAMING_SNAKE_CASE : str = pad_token_id
__SCREAMING_SNAKE_CASE : Optional[int] = max_length
__SCREAMING_SNAKE_CASE : Optional[int] = vocab
__SCREAMING_SNAKE_CASE : Any = merges
__SCREAMING_SNAKE_CASE : Dict = BytePairTokenizer(lowerCAmelCase__ , lowerCAmelCase__ , sequence_length=lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :str , lowerCAmelCase__ :GPTaTokenizer , *lowerCAmelCase__ :Optional[int] , **lowerCAmelCase__ :Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE : str = [''' '''.join(lowerCAmelCase__ ) for m in tokenizer.bpe_ranks.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.get_vocab()
return cls(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :List[str] , lowerCAmelCase__ :Union[str, os.PathLike] , *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Any = GPTaTokenizer.from_pretrained(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
return cls.from_tokenizer(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :Optional[Any] , lowerCAmelCase__ :Tuple ) -> Union[str, Any]:
return cls(**lowerCAmelCase__ )
def __magic_name__( self :int ) -> List[Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __magic_name__( self :int , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int = None ) -> Tuple:
__SCREAMING_SNAKE_CASE : Tuple = self.tf_tokenizer(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tf.ones_like(lowerCAmelCase__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
__SCREAMING_SNAKE_CASE : Optional[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = pad_model_inputs(
lowerCAmelCase__ , max_seq_length=lowerCAmelCase__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 696 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : int ={
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2}
__lowerCAmelCase : Union[str, Any] ={}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer
def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
__SCREAMING_SNAKE_CASE : str = strip_accents
__SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = do_lower_case
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 696 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 696 |
import os
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = len(grid[0] )
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Dict = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowercase__ ):
for j in range(n_rows - 3 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__SCREAMING_SNAKE_CASE : Optional[int] = max(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if max_product > largest:
__SCREAMING_SNAKE_CASE : Tuple = max_product
return largest
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
with open(os.path.dirname(lowercase__ ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
__SCREAMING_SNAKE_CASE : str = [[int(lowercase__ ) for i in grid[j]] for j in range(len(lowercase__ ) )]
return largest_product(lowercase__ )
if __name__ == "__main__":
print(solution())
| 696 | 1 |
from collections.abc import Iterable
from typing import Any
class _lowercase :
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :int | None = None ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = value
__SCREAMING_SNAKE_CASE : Node | None = None # Added in order to delete a node easier
__SCREAMING_SNAKE_CASE : Node | None = None
__SCREAMING_SNAKE_CASE : Node | None = None
def __repr__( self :Union[str, Any] ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} , indent=1 )
class _lowercase :
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :Node | None = None ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Tuple = root
def __str__( self :Optional[int] ) -> str:
return str(self.root )
def __magic_name__( self :Dict , lowerCAmelCase__ :Node , lowerCAmelCase__ :Node | None ) -> None:
if new_children is not None: # reset its kids
__SCREAMING_SNAKE_CASE : int = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase__ ): # If it is the right children
__SCREAMING_SNAKE_CASE : List[str] = new_children
else:
__SCREAMING_SNAKE_CASE : List[Any] = new_children
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = new_children
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Node ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __magic_name__( self :Optional[int] ) -> bool:
return self.root is None
def __magic_name__( self :Any , lowerCAmelCase__ :Tuple ) -> None:
__SCREAMING_SNAKE_CASE : List[str] = Node(lowerCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
__SCREAMING_SNAKE_CASE : int = new_node # set its root
else: # Tree is not empty
__SCREAMING_SNAKE_CASE : List[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
__SCREAMING_SNAKE_CASE : Optional[int] = parent_node.left
else:
if parent_node.right is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = new_node
break
else:
__SCREAMING_SNAKE_CASE : Tuple = parent_node.right
__SCREAMING_SNAKE_CASE : Optional[Any] = parent_node
def __magic_name__( self :List[Any] , *lowerCAmelCase__ :Any ) -> None:
for value in values:
self.__insert(lowerCAmelCase__ )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[int] ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
__SCREAMING_SNAKE_CASE : Any = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__SCREAMING_SNAKE_CASE : List[Any] = node.left if value < node.value else node.right
return node
def __magic_name__( self :Dict , lowerCAmelCase__ :Node | None = None ) -> Node | None:
if node is None:
if self.root is None:
return None
__SCREAMING_SNAKE_CASE : List[str] = self.root
if not self.empty():
while node.right is not None:
__SCREAMING_SNAKE_CASE : Any = node.right
return node
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Node | None = None ) -> Node | None:
if node is None:
__SCREAMING_SNAKE_CASE : Dict = self.root
if self.root is None:
return None
if not self.empty():
__SCREAMING_SNAKE_CASE : int = self.root
while node.left is not None:
__SCREAMING_SNAKE_CASE : int = node.left
return node
def __magic_name__( self :int , lowerCAmelCase__ :int ) -> None:
__SCREAMING_SNAKE_CASE : int = self.search(lowerCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase__ , lowerCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase__ , node.left )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
__SCREAMING_SNAKE_CASE : Optional[int] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __magic_name__( self :Any , lowerCAmelCase__ :Node | None ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __magic_name__( self :int , lowerCAmelCase__ :str=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :list , lowerCAmelCase__ :Node | None ) -> None:
if node:
self.inorder(lowerCAmelCase__ , node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase__ , node.right )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Node ) -> int:
__SCREAMING_SNAKE_CASE : list[int] = []
self.inorder(lowerCAmelCase__ , lowerCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
if curr_node is not None:
__SCREAMING_SNAKE_CASE : int = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
__SCREAMING_SNAKE_CASE : List[str] = BinarySearchTree()
for i in testlist:
t.insert(lowercase__ )
# Prints all the elements of the list in order traversal
print(lowercase__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowercase__ )
print(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) )
class _lowercase :
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier
__SCREAMING_SNAKE_CASE : Dict = min_depth
__SCREAMING_SNAKE_CASE : List[str] = tf_padding
__SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier )
__SCREAMING_SNAKE_CASE : List[str] = output_stride
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = scope
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __magic_name__( self :Dict ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __magic_name__( self :List[Any] ) -> List[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __magic_name__( self :Any ) -> Dict:
pass
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Optional[int] = 26
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :List[str] ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
__SCREAMING_SNAKE_CASE : int = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 696 | 1 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCAmelCase : int ='src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : Optional[int] =importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__lowerCAmelCase : List[str] =spec.loader.load_module()
__lowerCAmelCase : Optional[int] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCAmelCase : Optional[Any] =re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
__lowerCAmelCase : Union[str, Any] ={
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Any = []
for config_class in list(CONFIG_MAPPING.values() ):
__SCREAMING_SNAKE_CASE : Any = False
# source code of `config_class`
__SCREAMING_SNAKE_CASE : Optional[int] = inspect.getsource(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = _re_checkpoint.findall(lowercase__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = True
break
__SCREAMING_SNAKE_CASE : Optional[Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowercase__ )
if len(lowercase__ ) > 0:
__SCREAMING_SNAKE_CASE : int = '''\n'''.join(sorted(lowercase__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 |
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase : List[Any] =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = Github(os.environ['''GITHUB_TOKEN'''] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = g.get_repo('''huggingface/diffusers''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 696 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
__lowerCAmelCase : int =logging.get_logger(__name__)
@dataclass
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :int=False , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Any=6.0 , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :str=False , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :List[Any]="fp4" , lowerCAmelCase__ :Optional[int]=False , **lowerCAmelCase__ :Any , ) -> List[Any]:
__SCREAMING_SNAKE_CASE : str = load_in_abit
__SCREAMING_SNAKE_CASE : List[str] = load_in_abit
__SCREAMING_SNAKE_CASE : Optional[Any] = llm_inta_threshold
__SCREAMING_SNAKE_CASE : Optional[int] = llm_inta_skip_modules
__SCREAMING_SNAKE_CASE : Tuple = llm_inta_enable_fpaa_cpu_offload
__SCREAMING_SNAKE_CASE : Optional[Any] = llm_inta_has_fpaa_weight
__SCREAMING_SNAKE_CASE : Optional[int] = bnb_abit_quant_type
__SCREAMING_SNAKE_CASE : Optional[int] = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__SCREAMING_SNAKE_CASE : List[str] = torch.floataa
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , torch.dtype ):
__SCREAMING_SNAKE_CASE : Tuple = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def __magic_name__( self :int ) -> Optional[Any]:
if not isinstance(self.llm_inta_threshold , lowerCAmelCase__ ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowerCAmelCase__ ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowerCAmelCase__ ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , lowerCAmelCase__ ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , lowerCAmelCase__ ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , lowerCAmelCase__ ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def __magic_name__( self :Any ) -> List[str]:
return self.load_in_abit or self.load_in_abit
def __magic_name__( self :Tuple ) -> str:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def __magic_name__( cls :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Any:
__SCREAMING_SNAKE_CASE : int = cls(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = []
for key, value in kwargs.items():
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
to_remove.append(lowerCAmelCase__ )
for key in to_remove:
kwargs.pop(lowerCAmelCase__ , lowerCAmelCase__ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def __magic_name__( self :int , lowerCAmelCase__ :Union[str, os.PathLike] ) -> Optional[Any]:
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
__SCREAMING_SNAKE_CASE : str = self.to_dict()
__SCREAMING_SNAKE_CASE : Union[str, Any] = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + '''\n'''
writer.write(lowerCAmelCase__ )
def __magic_name__( self :Optional[int] ) -> Dict[str, Any]:
__SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE : int = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self :Optional[int] ) -> Tuple:
return f'''{self.__class__.__name__} {self.to_json_string()}'''
def __magic_name__( self :Tuple , lowerCAmelCase__ :bool = True ) -> str:
if use_diff is True:
__SCREAMING_SNAKE_CASE : List[Any] = self.to_diff_dict()
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.to_dict()
return json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + "\n"
def __magic_name__( self :Dict ) -> Dict[str, Any]:
__SCREAMING_SNAKE_CASE : Any = self.to_dict()
# get the default config dict
__SCREAMING_SNAKE_CASE : Dict = BitsAndBytesConfig().to_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__SCREAMING_SNAKE_CASE : Any = value
return serializable_config_dict
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''canine'''
def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
# Character config:
__SCREAMING_SNAKE_CASE : Tuple = downsampling_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size
__SCREAMING_SNAKE_CASE : Any = num_hash_functions
__SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets
__SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
| 696 | 1 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = CodeGenTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CodeGenTokenizerFast
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : List[str] = {'''add_prefix_space''': True}
SCREAMING_SNAKE_CASE__ : Dict = False
def __magic_name__( self :Tuple ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__SCREAMING_SNAKE_CASE : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
__SCREAMING_SNAKE_CASE : int = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : str = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__SCREAMING_SNAKE_CASE : List[str] = {'''unk_token''': '''<unk>'''}
__SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase__ ) )
def __magic_name__( self :List[Any] , **lowerCAmelCase__ :str ) -> str:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , **lowerCAmelCase__ :Any ) -> Tuple:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :int ) -> Dict:
__SCREAMING_SNAKE_CASE : int = '''lower newer'''
__SCREAMING_SNAKE_CASE : Tuple = '''lower newer'''
return input_text, output_text
def __magic_name__( self :Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE : Optional[int] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__SCREAMING_SNAKE_CASE : Dict = '''lower newer'''
__SCREAMING_SNAKE_CASE : Dict = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize(lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Dict:
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''lower newer'''
# Testing tokenization
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize(lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing conversion to ids without special tokens
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing conversion to ids with special tokens
__SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing the unknown token
__SCREAMING_SNAKE_CASE : List[Any] = tokens + [rust_tokenizer.unk_token]
__SCREAMING_SNAKE_CASE : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :Tuple , *lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :List[str] ) -> Dict:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Union[str, Any]=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
# Simple input
__SCREAMING_SNAKE_CASE : Dict = '''This is a simple input'''
__SCREAMING_SNAKE_CASE : int = ['''This is a simple input 1''', '''This is a simple input 2''']
__SCREAMING_SNAKE_CASE : Optional[Any] = ('''This is a simple input''', '''This is a pair''')
__SCREAMING_SNAKE_CASE : str = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' , )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
__SCREAMING_SNAKE_CASE : str = '''This is a simple input'''
__SCREAMING_SNAKE_CASE : List[Any] = ['''This is a simple input looooooooong''', '''This is a simple input''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
__SCREAMING_SNAKE_CASE : Tuple = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.pad_token_id
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowerCAmelCase__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
__SCREAMING_SNAKE_CASE : str = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , truncate=lowerCAmelCase__ , return_tensors='''np''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(*lowerCAmelCase__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , truncate=lowerCAmelCase__ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : str = '''$$$'''
__SCREAMING_SNAKE_CASE : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCAmelCase__ , add_bos_token=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''This is a simple input'''
__SCREAMING_SNAKE_CASE : int = ['''This is a simple input 1''', '''This is a simple input 2''']
__SCREAMING_SNAKE_CASE : Any = tokenizer.bos_token_id
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = tokenizer(lowerCAmelCase__ )
self.assertEqual(out_s.input_ids[0] , lowerCAmelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(out_s.input_ids )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowerCAmelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __magic_name__( self :Optional[int] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[str] = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
__SCREAMING_SNAKE_CASE : List[str] = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
__SCREAMING_SNAKE_CASE : Any = '''\nif len_a > len_b: result = a\nelse: result = b'''
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(lowerCAmelCase__ , truncate_before_pattern=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Any:
pass
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''mems''']
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = []
self.cutoffs.extend(lowerCAmelCase__ )
if proj_share_all_but_first:
__SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs )
else:
__SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs )
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed
__SCREAMING_SNAKE_CASE : Tuple = d_head
__SCREAMING_SNAKE_CASE : Dict = d_inner
__SCREAMING_SNAKE_CASE : Optional[Any] = div_val
__SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm
__SCREAMING_SNAKE_CASE : List[str] = n_layer
__SCREAMING_SNAKE_CASE : int = n_head
__SCREAMING_SNAKE_CASE : str = mem_len
__SCREAMING_SNAKE_CASE : Union[str, Any] = same_length
__SCREAMING_SNAKE_CASE : str = attn_type
__SCREAMING_SNAKE_CASE : Dict = clamp_len
__SCREAMING_SNAKE_CASE : Tuple = sample_softmax
__SCREAMING_SNAKE_CASE : Optional[int] = adaptive
__SCREAMING_SNAKE_CASE : int = dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = dropatt
__SCREAMING_SNAKE_CASE : int = untie_r
__SCREAMING_SNAKE_CASE : Optional[int] = init
__SCREAMING_SNAKE_CASE : List[str] = init_range
__SCREAMING_SNAKE_CASE : Any = proj_init_std
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :str ) -> int:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 696 | 1 |
__lowerCAmelCase : dict[str, float] ={
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.6_0_9_3_4_4,
"knot": 1.8_5_2,
}
__lowerCAmelCase : dict[str, float] ={
"km/h": 1.0,
"m/s": 0.2_7_7_7_7_7_7_7_8,
"mph": 0.6_2_1_3_7_1_1_9_2,
"knot": 0.5_3_9_9_5_6_8_0_3,
}
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
F'''Valid values are: {', '.join(lowercase__ )}'''
)
raise ValueError(lowercase__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : Any ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert'''
def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = position_embedding_type
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
| 696 | 1 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ='https://openaipublic.azureedge.net/jukebox/models/'
__lowerCAmelCase : str ={
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _UpperCamelCase ( lowercase__ ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__SCREAMING_SNAKE_CASE : str = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__SCREAMING_SNAKE_CASE : str = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__SCREAMING_SNAKE_CASE : Optional[int] = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = {}
import re
__SCREAMING_SNAKE_CASE : List[str] = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__SCREAMING_SNAKE_CASE : int = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__SCREAMING_SNAKE_CASE : str = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__SCREAMING_SNAKE_CASE : int = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__SCREAMING_SNAKE_CASE : Tuple = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__SCREAMING_SNAKE_CASE : Optional[int] = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__SCREAMING_SNAKE_CASE : Any = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__SCREAMING_SNAKE_CASE : str = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowercase__ ):
__SCREAMING_SNAKE_CASE : int = re_encoder_block_conv_in.match(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = regex_match.groups()
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] )
__SCREAMING_SNAKE_CASE : List[Any] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
__SCREAMING_SNAKE_CASE : Any = re_encoder_block_conv_in.sub(lowercase__ , lowercase__ )
elif re_encoder_block_resnet.fullmatch(lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_encoder_block_resnet.match(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = regex_match.groups()
__SCREAMING_SNAKE_CASE : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
__SCREAMING_SNAKE_CASE : List[Any] = {'''1''': 1, '''3''': 2}[groups[-2]]
__SCREAMING_SNAKE_CASE : str = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
__SCREAMING_SNAKE_CASE : Dict = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
__SCREAMING_SNAKE_CASE : Tuple = prefix + resnet_block
__SCREAMING_SNAKE_CASE : int = re_encoder_block_resnet.sub(lowercase__ , lowercase__ )
elif re_encoder_block_proj_out.fullmatch(lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = re_encoder_block_proj_out.match(lowercase__ )
__SCREAMING_SNAKE_CASE : int = regex_match.groups()
__SCREAMING_SNAKE_CASE : Any = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
__SCREAMING_SNAKE_CASE : Any = re_encoder_block_proj_out.sub(lowercase__ , lowercase__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowercase__ ):
__SCREAMING_SNAKE_CASE : int = re_decoder_block_conv_out.match(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = regex_match.groups()
__SCREAMING_SNAKE_CASE : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
__SCREAMING_SNAKE_CASE : Tuple = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
__SCREAMING_SNAKE_CASE : str = re_decoder_block_conv_out.sub(lowercase__ , lowercase__ )
elif re_decoder_block_resnet.fullmatch(lowercase__ ):
__SCREAMING_SNAKE_CASE : str = re_decoder_block_resnet.match(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = regex_match.groups()
__SCREAMING_SNAKE_CASE : str = int(groups[2] ) * 2 + int(groups[3] ) - 2
__SCREAMING_SNAKE_CASE : Tuple = {'''1''': 1, '''3''': 2}[groups[-2]]
__SCREAMING_SNAKE_CASE : List[Any] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
__SCREAMING_SNAKE_CASE : Dict = prefix + resnet_block
__SCREAMING_SNAKE_CASE : Any = re_decoder_block_resnet.sub(lowercase__ , lowercase__ )
elif re_decoder_block_proj_in.fullmatch(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = re_decoder_block_proj_in.match(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = regex_match.groups()
__SCREAMING_SNAKE_CASE : int = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
__SCREAMING_SNAKE_CASE : Any = re_decoder_block_proj_in.sub(lowercase__ , lowercase__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = re_prior_cond_conv_out.match(lowercase__ )
__SCREAMING_SNAKE_CASE : int = regex_match.groups()
__SCREAMING_SNAKE_CASE : List[str] = int(groups[1] ) * 2 + int(groups[2] ) - 2
__SCREAMING_SNAKE_CASE : List[str] = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
__SCREAMING_SNAKE_CASE : int = re_prior_cond_conv_out.sub(lowercase__ , lowercase__ )
elif re_prior_cond_resnet.fullmatch(lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_prior_cond_resnet.match(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = regex_match.groups()
__SCREAMING_SNAKE_CASE : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
__SCREAMING_SNAKE_CASE : Optional[int] = {'''1''': 1, '''3''': 2}[groups[-2]]
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
__SCREAMING_SNAKE_CASE : Tuple = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
__SCREAMING_SNAKE_CASE : List[str] = prefix + resnet_block
__SCREAMING_SNAKE_CASE : Tuple = re_prior_cond_resnet.sub(lowercase__ , lowercase__ )
elif re_prior_cond_proj_in.fullmatch(lowercase__ ):
__SCREAMING_SNAKE_CASE : int = re_prior_cond_proj_in.match(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = regex_match.groups()
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_prior_cond_proj_in.sub(lowercase__ , lowercase__ )
# keep original key
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = original_key
__SCREAMING_SNAKE_CASE : Any = replace_key(lowercase__ )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
__SCREAMING_SNAKE_CASE : Any = model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
__SCREAMING_SNAKE_CASE : int = original_key
__SCREAMING_SNAKE_CASE : Union[str, Any] = original_key
__SCREAMING_SNAKE_CASE : str = value
return new_dict
@torch.no_grad()
def _UpperCamelCase ( lowercase__=None , lowercase__=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
__SCREAMING_SNAKE_CASE : Any = requests.get(F'''{PREFIX}{file}''' , allow_redirects=lowercase__ )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=lowercase__ )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content )
__SCREAMING_SNAKE_CASE : str = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__SCREAMING_SNAKE_CASE : List[Any] = JukeboxConfig.from_pretrained(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = JukeboxModel(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = []
__SCREAMING_SNAKE_CASE : Dict = {}
for i, dict_name in enumerate(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model''']
__SCREAMING_SNAKE_CASE : Tuple = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__SCREAMING_SNAKE_CASE : List[Any] = old_dic[k]
elif k.endswith('''.w''' ):
__SCREAMING_SNAKE_CASE : int = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__SCREAMING_SNAKE_CASE : Any = old_dic[k]
else:
__SCREAMING_SNAKE_CASE : List[Any] = old_dic[k]
__SCREAMING_SNAKE_CASE : Dict = '''vqvae''' if i == 0 else F'''priors.{3 - i}'''
__SCREAMING_SNAKE_CASE : str = fix_jukebox_keys(lowercase__ , model.state_dict() , lowercase__ , lowercase__ )
weight_dict.append(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = weight_dict.pop(0 )
model.vqvae.load_state_dict(lowercase__ )
for i in range(len(lowercase__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(lowercase__ , lowercase__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
return weight_dict
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
__lowerCAmelCase : List[str] =parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 696 |
import os
import sys
import unittest
__lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers')
__lowerCAmelCase : Optional[Any] ='\n{0} = None\n'
__lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
__lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tokenizers''' )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' )
__SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' )
__SCREAMING_SNAKE_CASE : Any = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' )
__SCREAMING_SNAKE_CASE : List[str] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCAmelCase__ )
self.assertIn('''tensorflow_text''' , lowerCAmelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' )
__SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__SCREAMING_SNAKE_CASE : int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
| 696 | 1 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__lowerCAmelCase : Optional[int] ={'UserAgent': UserAgent().random}
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = script.contents[0]
__SCREAMING_SNAKE_CASE : Dict = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _lowercase :
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :str ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Dict = f'''https://www.instagram.com/{username}/'''
__SCREAMING_SNAKE_CASE : int = self.get_json()
def __magic_name__( self :Optional[int] ) -> dict:
__SCREAMING_SNAKE_CASE : List[str] = requests.get(self.url , headers=lowerCAmelCase__ ).text
__SCREAMING_SNAKE_CASE : Dict = BeautifulSoup(lowerCAmelCase__ , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self :Optional[Any] ) -> str:
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self :Tuple ) -> str:
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __magic_name__( self :Any ) -> str:
return self.user_data["username"]
@property
def __magic_name__( self :Tuple ) -> str:
return self.user_data["full_name"]
@property
def __magic_name__( self :str ) -> str:
return self.user_data["biography"]
@property
def __magic_name__( self :Any ) -> str:
return self.user_data["business_email"]
@property
def __magic_name__( self :int ) -> str:
return self.user_data["external_url"]
@property
def __magic_name__( self :int ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __magic_name__( self :int ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __magic_name__( self :int ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __magic_name__( self :Optional[int] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __magic_name__( self :Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def __magic_name__( self :Tuple ) -> bool:
return self.user_data["is_private"]
def _UpperCamelCase ( lowercase__ = "github" ):
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
__SCREAMING_SNAKE_CASE : Union[str, Any] = InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Tuple =InstagramUser('github')
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 696 |
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCamelCase ( lowercase__ ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0]
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return math.pow(lowercase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 696 | 1 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase : int ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_2_8,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 5_0,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 1_0,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 1_0,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __magic_name__( cls :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :List[str] ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :Dict ) -> Optional[int]:
CustomConfig.register_for_auto_class()
__SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int
__SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float
__SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool
__SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = PretrainedConfig()
__SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(lowerCAmelCase__ )}.''' )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
__SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock()
__SCREAMING_SNAKE_CASE : List[Any] = 500
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError
__SCREAMING_SNAKE_CASE : str = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head:
__SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
# This test is for deprecated behavior and can be removed in v5
__SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __magic_name__( self :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json''']
__SCREAMING_SNAKE_CASE : Tuple = 768
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) )
__SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __magic_name__( self :List[str] ) -> Union[str, Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__SCREAMING_SNAKE_CASE : int = '''v4.0.0'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0'''
__SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__SCREAMING_SNAKE_CASE : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int = 13 , lowerCAmelCase__ :int = 64 , lowerCAmelCase__ :int = 2 , lowerCAmelCase__ :int = 3 , lowerCAmelCase__ :int = 3 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 128 , lowerCAmelCase__ :int=[16, 32, 64, 128] , lowerCAmelCase__ :int = 7 , lowerCAmelCase__ :int = 4 , lowerCAmelCase__ :int = 37 , lowerCAmelCase__ :str = "gelu" , lowerCAmelCase__ :float = 0.1 , lowerCAmelCase__ :float = 0.1 , lowerCAmelCase__ :int = 10 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :int = 2 , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :int = 128 , lowerCAmelCase__ :List[int] = [2, 2, 2, 2] , lowerCAmelCase__ :int = 2 , lowerCAmelCase__ :int = 2 , ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = parent
__SCREAMING_SNAKE_CASE : str = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
__SCREAMING_SNAKE_CASE : List[str] = num_channels
__SCREAMING_SNAKE_CASE : Optional[int] = is_training
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : List[str] = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
__SCREAMING_SNAKE_CASE : Any = encoder_stride
__SCREAMING_SNAKE_CASE : List[str] = num_attention_outputs
__SCREAMING_SNAKE_CASE : Any = embed_dim
__SCREAMING_SNAKE_CASE : Tuple = embed_dim + 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = resolution
__SCREAMING_SNAKE_CASE : Optional[int] = depths
__SCREAMING_SNAKE_CASE : str = hidden_sizes
__SCREAMING_SNAKE_CASE : List[str] = dim
__SCREAMING_SNAKE_CASE : int = mlp_expansion_ratio
def __magic_name__( self :Any ) -> List[str]:
__SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : str = self.get_config()
return config, pixel_values, labels
def __magic_name__( self :Optional[int] ) -> Optional[Any]:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def __magic_name__( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any] ) -> int:
__SCREAMING_SNAKE_CASE : int = TFEfficientFormerModel(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE : Tuple = TFEfficientFormerForImageClassification(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
__SCREAMING_SNAKE_CASE : Optional[int] = TFEfficientFormerForImageClassification(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
__SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Any = False
def __magic_name__( self :str ) -> int:
__SCREAMING_SNAKE_CASE : Dict = TFEfficientFormerModelTester(self )
__SCREAMING_SNAKE_CASE : str = ConfigTester(
self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__( self :Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''' )
def __magic_name__( self :int ) -> Optional[Any]:
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''' )
def __magic_name__( self :Dict ) -> Optional[int]:
pass
def __magic_name__( self :Optional[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Any:
def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict ):
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) , training=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__SCREAMING_SNAKE_CASE : List[str] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
if hasattr(self.model_tester , '''encoder_seq_length''' ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''' ) and self.model_tester.chunk_length > 1:
__SCREAMING_SNAKE_CASE : List[str] = seq_length * self.model_tester.chunk_length
else:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__SCREAMING_SNAKE_CASE : Any = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''seq_length''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = getattr(self.model_tester , '''decoder_seq_length''' , lowerCAmelCase__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str=False ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __magic_name__( self :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''' )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Optional[Any] ) -> int:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : int = TFEfficientFormerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , '''seq_length''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , '''encoder_seq_length''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = getattr(self.model_tester , '''key_length''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = getattr(self.model_tester , '''chunk_length''' , lowerCAmelCase__ )
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) , training=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) , training=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def __magic_name__( self :Optional[Any] ) -> Union[str, Any]:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__SCREAMING_SNAKE_CASE : Optional[int] = model_class(lowerCAmelCase__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__SCREAMING_SNAKE_CASE : Any = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ )
self.assertTrue(outputs_dict is not None )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Any ) -> List[str]:
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''' )
if is_vision_available()
else None
)
@slow
def __magic_name__( self :Optional[int] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''' )
__SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
__SCREAMING_SNAKE_CASE : Tuple = prepare_img()
__SCREAMING_SNAKE_CASE : Any = image_processor(images=lowerCAmelCase__ , return_tensors='''tf''' )
# forward pass
__SCREAMING_SNAKE_CASE : Tuple = model(**lowerCAmelCase__ , training=lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def __magic_name__( self :List[Any] ) -> int:
__SCREAMING_SNAKE_CASE : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''' )
__SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
__SCREAMING_SNAKE_CASE : Dict = prepare_img()
__SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCAmelCase__ , return_tensors='''tf''' )
# forward pass
__SCREAMING_SNAKE_CASE : Tuple = model(**lowerCAmelCase__ , training=lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : List[Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 696 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : List[str] ={
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple =[
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] =['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =[
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase : int ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_2_8,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 5_0,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 1_0,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 1_0,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __magic_name__( cls :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :List[str] ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :Dict ) -> Optional[int]:
CustomConfig.register_for_auto_class()
__SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int
__SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float
__SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool
__SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = PretrainedConfig()
__SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(lowerCAmelCase__ )}.''' )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
__SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock()
__SCREAMING_SNAKE_CASE : List[Any] = 500
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError
__SCREAMING_SNAKE_CASE : str = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head:
__SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
# This test is for deprecated behavior and can be removed in v5
__SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __magic_name__( self :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json''']
__SCREAMING_SNAKE_CASE : Tuple = 768
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) )
__SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __magic_name__( self :List[str] ) -> Union[str, Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__SCREAMING_SNAKE_CASE : int = '''v4.0.0'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0'''
__SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 696 | 1 |
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase : List[Any] =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = Github(os.environ['''GITHUB_TOKEN'''] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = g.get_repo('''huggingface/diffusers''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 696 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = F'''{sampling_rate}'''
__SCREAMING_SNAKE_CASE : str = '''1'''
__SCREAMING_SNAKE_CASE : Optional[int] = '''f32le'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(lowercase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__SCREAMING_SNAKE_CASE : List[Any] = ffmpeg_process.communicate(lowercase__ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
__SCREAMING_SNAKE_CASE : Union[str, Any] = output_stream[0]
__SCREAMING_SNAKE_CASE : Any = np.frombuffer(lowercase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = "f32le" , ):
__SCREAMING_SNAKE_CASE : Tuple = F'''{sampling_rate}'''
__SCREAMING_SNAKE_CASE : Optional[int] = '''1'''
if format_for_conversion == "s16le":
__SCREAMING_SNAKE_CASE : Optional[Any] = 2
elif format_for_conversion == "f32le":
__SCREAMING_SNAKE_CASE : List[str] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
__SCREAMING_SNAKE_CASE : str = platform.system()
if system == "Linux":
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''alsa'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''default'''
elif system == "Darwin":
__SCREAMING_SNAKE_CASE : str = '''avfoundation'''
__SCREAMING_SNAKE_CASE : Optional[Any] = ''':0'''
elif system == "Windows":
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''dshow'''
__SCREAMING_SNAKE_CASE : Any = '''default'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
__SCREAMING_SNAKE_CASE : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__SCREAMING_SNAKE_CASE : int = _ffmpeg_stream(lowercase__ , lowercase__ )
for item in iterator:
yield item
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = "f32le" , ):
if stream_chunk_s is not None:
__SCREAMING_SNAKE_CASE : List[str] = stream_chunk_s
else:
__SCREAMING_SNAKE_CASE : List[str] = chunk_length_s
__SCREAMING_SNAKE_CASE : Optional[int] = ffmpeg_microphone(lowercase__ , lowercase__ , format_for_conversion=lowercase__ )
if format_for_conversion == "s16le":
__SCREAMING_SNAKE_CASE : Optional[Any] = np.intaa
__SCREAMING_SNAKE_CASE : int = 2
elif format_for_conversion == "f32le":
__SCREAMING_SNAKE_CASE : Optional[Any] = np.floataa
__SCREAMING_SNAKE_CASE : List[Any] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
__SCREAMING_SNAKE_CASE : Any = chunk_length_s / 6
__SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowercase__ , (int, float) ):
__SCREAMING_SNAKE_CASE : int = [stride_length_s, stride_length_s]
__SCREAMING_SNAKE_CASE : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__SCREAMING_SNAKE_CASE : int = datetime.datetime.now()
__SCREAMING_SNAKE_CASE : int = datetime.timedelta(seconds=lowercase__ )
for item in chunk_bytes_iter(lowercase__ , lowercase__ , stride=(stride_left, stride_right) , stream=lowercase__ ):
# Put everything back in numpy scale
__SCREAMING_SNAKE_CASE : int = np.frombuffer(item['''raw'''] , dtype=lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
__SCREAMING_SNAKE_CASE : int = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : str = b''''''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
for raw in iterator:
acc += raw
if stream and len(lowercase__ ) < chunk_len:
__SCREAMING_SNAKE_CASE : int = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowercase__ ) >= chunk_len:
# We are flushing the accumulator
__SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right)
__SCREAMING_SNAKE_CASE : int = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
__SCREAMING_SNAKE_CASE : Tuple = False
yield item
__SCREAMING_SNAKE_CASE : Any = stride_left
__SCREAMING_SNAKE_CASE : List[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowercase__ ) > stride_left:
__SCREAMING_SNAKE_CASE : int = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
__SCREAMING_SNAKE_CASE : int = False
yield item
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = 2**24 # 16Mo
try:
with subprocess.Popen(lowercase__ , stdout=subprocess.PIPE , bufsize=lowercase__ ) as ffmpeg_process:
while True:
__SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.stdout.read(lowercase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Optional[int] = d_kv
__SCREAMING_SNAKE_CASE : Tuple = d_ff
__SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE : List[Any] = num_heads
__SCREAMING_SNAKE_CASE : List[Any] = num_experts
__SCREAMING_SNAKE_CASE : Tuple = expert_capacity
__SCREAMING_SNAKE_CASE : List[Any] = router_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__SCREAMING_SNAKE_CASE : List[Any] = router_dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate
__SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : int = initializer_factor
__SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj
__SCREAMING_SNAKE_CASE : Any = use_cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs
__SCREAMING_SNAKE_CASE : int = router_z_loss_coef
__SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
__SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
__SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 696 | 1 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = -1
__SCREAMING_SNAKE_CASE : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__SCREAMING_SNAKE_CASE : List[Any] = TextStreamer(lowerCAmelCase__ )
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__SCREAMING_SNAKE_CASE : Tuple = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__SCREAMING_SNAKE_CASE : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = -1
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(greedy_ids[0] )
__SCREAMING_SNAKE_CASE : int = TextIteratorStreamer(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__SCREAMING_SNAKE_CASE : Optional[Any] = Thread(target=model.generate , kwargs=lowerCAmelCase__ )
thread.start()
__SCREAMING_SNAKE_CASE : List[str] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__SCREAMING_SNAKE_CASE : int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = -1
__SCREAMING_SNAKE_CASE : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = greedy_ids[:, input_ids.shape[1] :]
__SCREAMING_SNAKE_CASE : Any = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__SCREAMING_SNAKE_CASE : int = TextStreamer(lowerCAmelCase__ , skip_prompt=lowerCAmelCase__ )
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__SCREAMING_SNAKE_CASE : Tuple = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained('''distilgpt2''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = -1
__SCREAMING_SNAKE_CASE : List[Any] = torch.ones((1, 5) , device=lowerCAmelCase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__SCREAMING_SNAKE_CASE : Any = TextStreamer(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
model.generate(lowerCAmelCase__ , max_new_tokens=1 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__SCREAMING_SNAKE_CASE : List[Any] = cs.out[:-1] # Remove the final "\n"
__SCREAMING_SNAKE_CASE : Any = tokenizer(lowerCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __magic_name__( self :Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = -1
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = TextIteratorStreamer(lowerCAmelCase__ , timeout=0.001 )
__SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__SCREAMING_SNAKE_CASE : Union[str, Any] = Thread(target=model.generate , kwargs=lowerCAmelCase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 696 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 696 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
# Construct model
if gpta_config_file == "":
__SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaConfig()
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig.from_json_file(lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaModel(lowercase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
__SCREAMING_SNAKE_CASE : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__SCREAMING_SNAKE_CASE : Any = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , lowercase__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__lowerCAmelCase : List[str] =parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 696 |
from datetime import datetime
import requests
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowercase__ ).content
if __name__ == "__main__":
__lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip()
__lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 696 | 1 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = list(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = list(lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for i in range(len(lowercase__ ) ):
if lista[i] != lista[i]:
count += 1
__SCREAMING_SNAKE_CASE : List[Any] = '''_'''
if count > 1:
return False
else:
return "".join(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : int = []
while True:
__SCREAMING_SNAKE_CASE : Tuple = ['''$'''] * len(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = []
for i in range(len(lowercase__ ) ):
for j in range(i + 1 , len(lowercase__ ) ):
__SCREAMING_SNAKE_CASE : str = compare_string(binary[i] , binary[j] )
if k is False:
__SCREAMING_SNAKE_CASE : List[str] = '''*'''
__SCREAMING_SNAKE_CASE : Dict = '''*'''
temp.append('''X''' )
for i in range(len(lowercase__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowercase__ ) == 0:
return pi
__SCREAMING_SNAKE_CASE : Any = list(set(lowercase__ ) )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
for minterm in minterms:
__SCREAMING_SNAKE_CASE : List[str] = ''''''
for _ in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : int = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowercase__ )
return temp
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = list(lowercase__ )
__SCREAMING_SNAKE_CASE : int = list(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = 0
for i in range(len(lowercase__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : List[str] = [0] * len(lowercase__ )
for i in range(len(chart[0] ) ):
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : str = -1
for j in range(len(lowercase__ ) ):
if chart[j][i] == 1:
count += 1
__SCREAMING_SNAKE_CASE : int = j
if count == 1:
__SCREAMING_SNAKE_CASE : Any = 1
for i in range(len(lowercase__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowercase__ ) ):
__SCREAMING_SNAKE_CASE : str = 0
temp.append(prime_implicants[i] )
while True:
__SCREAMING_SNAKE_CASE : List[str] = 0
__SCREAMING_SNAKE_CASE : Any = -1
__SCREAMING_SNAKE_CASE : Dict = 0
for i in range(len(lowercase__ ) ):
__SCREAMING_SNAKE_CASE : str = chart[i].count(1 )
if count_n > max_n:
__SCREAMING_SNAKE_CASE : List[str] = count_n
__SCREAMING_SNAKE_CASE : Optional[int] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowercase__ ) ):
__SCREAMING_SNAKE_CASE : Optional[int] = 0
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = [[0 for x in range(len(lowercase__ ) )] for x in range(len(lowercase__ ) )]
for i in range(len(lowercase__ ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = prime_implicants[i].count('''_''' )
for j in range(len(lowercase__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = 1
return chart
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = int(input('''Enter the no. of variables\n''' ) )
__SCREAMING_SNAKE_CASE : List[Any] = [
float(lowercase__ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
__SCREAMING_SNAKE_CASE : Optional[int] = decimal_to_binary(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = check(lowercase__ )
print('''Prime Implicants are:''' )
print(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = prime_implicant_chart(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = selection(lowercase__ , lowercase__ )
print('''Essential Prime Implicants are:''' )
print(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 696 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self :int ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = '''french fries'''
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5
__SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae''']
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
__SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = self.get_inputs()
__SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : str = self.get_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = 0
def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__( self :List[str] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) )
__SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix'''
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
__SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 696 | 1 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = analyze_text(lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
__SCREAMING_SNAKE_CASE : Optional[Any] = sum(single_char_strings.values() )
# one length string
__SCREAMING_SNAKE_CASE : Optional[int] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__SCREAMING_SNAKE_CASE : Optional[Any] = single_char_strings[ch]
__SCREAMING_SNAKE_CASE : str = my_str / all_sum
my_fir_sum += prob * math.loga(lowercase__ ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
__SCREAMING_SNAKE_CASE : Dict = sum(two_char_strings.values() )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__SCREAMING_SNAKE_CASE : Tuple = cha + cha
if sequence in two_char_strings:
__SCREAMING_SNAKE_CASE : str = two_char_strings[sequence]
__SCREAMING_SNAKE_CASE : Optional[int] = int(lowercase__ ) / all_sum
my_sec_sum += prob * math.loga(lowercase__ )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = Counter() # type: ignore
__SCREAMING_SNAKE_CASE : Dict = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowercase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _UpperCamelCase ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 1
@register_to_config
def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]:
if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE : Any = 1.0
# setable values
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) )
def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor:
return sample
def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.timesteps += self.config.steps_offset
def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
__SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep]
__SCREAMING_SNAKE_CASE : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__SCREAMING_SNAKE_CASE : List[Any] = model_output
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE : List[str] = model_output
__SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __len__( self :Optional[int] ) -> List[Any]:
return self.config.num_train_timesteps
| 696 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self :int ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = '''french fries'''
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5
__SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae''']
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
__SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = self.get_inputs()
__SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : str = self.get_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = 0
def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__( self :List[str] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) )
__SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix'''
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
__SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 696 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : str = "text"
SCREAMING_SNAKE_CASE__ : str = "summary"
@property
def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 696 | 1 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _lowercase :
'''simple docstring'''
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict ) -> str:
return None
class _lowercase :
'''simple docstring'''
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
return None
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __magic_name__( self :Any ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCAmelCase__ , '''tf''' , 12 , **lowerCAmelCase__ )
@require_torch
@slow
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCAmelCase__ , '''pt''' , 12 , **lowerCAmelCase__ )
@require_torch
@slow
def __magic_name__( self :Tuple ) -> Optional[int]:
from transformers import BertModel
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(lowerCAmelCase__ ) )
vocab_file.flush()
__SCREAMING_SNAKE_CASE : int = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__SCREAMING_SNAKE_CASE : int = BertModel(BertConfig(vocab_size=len(lowerCAmelCase__ ) ) )
model.save_pretrained(lowerCAmelCase__ )
self._test_export(lowerCAmelCase__ , '''pt''' , 12 , lowerCAmelCase__ )
@require_tf
@slow
def __magic_name__( self :Dict ) -> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__SCREAMING_SNAKE_CASE : Tuple = self._test_export(lowerCAmelCase__ , '''tf''' , 12 , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = quantize(Path(lowerCAmelCase__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCAmelCase__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__SCREAMING_SNAKE_CASE : Any = self._test_export(lowerCAmelCase__ , '''pt''' , 12 , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = quantize(lowerCAmelCase__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCAmelCase__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str]=None , **lowerCAmelCase__ :int ) -> Dict:
try:
# Compute path
with TemporaryDirectory() as tempdir:
__SCREAMING_SNAKE_CASE : Optional[int] = Path(lowerCAmelCase__ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
return path
except Exception as e:
self.fail(lowerCAmelCase__ )
@require_torch
@require_tokenizers
@slow
def __magic_name__( self :Optional[Any] ) -> str:
from transformers import BertModel
__SCREAMING_SNAKE_CASE : Any = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(lowerCAmelCase__ , lowerCAmelCase__ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def __magic_name__( self :Union[str, Any] ) -> Dict:
from transformers import TFBertModel
__SCREAMING_SNAKE_CASE : List[Any] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
__SCREAMING_SNAKE_CASE : Tuple = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(lowerCAmelCase__ , lowerCAmelCase__ , '''tf''' )
def __magic_name__( self :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Tuple = FeatureExtractionPipeline(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = infer_shapes(lowerCAmelCase__ , lowerCAmelCase__ )
# Assert all variables are present
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowerCAmelCase__ )
self.assertSequenceEqual(variable_names[3:] , lowerCAmelCase__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def __magic_name__( self :Dict ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
__SCREAMING_SNAKE_CASE : List[str] = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = ensure_valid_input(FuncContiguousArgs() , lowerCAmelCase__ , lowerCAmelCase__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCAmelCase__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCAmelCase__ ) , set(lowerCAmelCase__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCAmelCase__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = ensure_valid_input(FuncNonContiguousArgs() , lowerCAmelCase__ , lowerCAmelCase__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 696 |
def _UpperCamelCase ( lowercase__ = 10**9 ):
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Any = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 | 1 |
def _UpperCamelCase ( lowercase__ = 10**9 ):
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Any = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE : str = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
if a[i].islower():
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__lowerCAmelCase : Tuple =data_utils.TransfoXLTokenizer
__lowerCAmelCase : Optional[int] =data_utils.TransfoXLCorpus
__lowerCAmelCase : str =data_utils
__lowerCAmelCase : Optional[int] =data_utils
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowercase__ , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : List[str] = pickle.load(lowercase__ , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__SCREAMING_SNAKE_CASE : Optional[int] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
__SCREAMING_SNAKE_CASE : str = corpus.vocab.__dict__
torch.save(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(lowercase__ , lowercase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__SCREAMING_SNAKE_CASE : Tuple = os.path.abspath(lowercase__ )
__SCREAMING_SNAKE_CASE : int = os.path.abspath(lowercase__ )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__SCREAMING_SNAKE_CASE : Any = TransfoXLConfig()
else:
__SCREAMING_SNAKE_CASE : Tuple = TransfoXLConfig.from_json_file(lowercase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = TransfoXLLMHeadModel(lowercase__ )
__SCREAMING_SNAKE_CASE : str = load_tf_weights_in_transfo_xl(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
__SCREAMING_SNAKE_CASE : Any = os.path.join(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(lowercase__ , lowercase__ )
print(F'''Save PyTorch model to {os.path.abspath(lowercase__ )}''' )
torch.save(model.state_dict() , lowercase__ )
print(F'''Save configuration file to {os.path.abspath(lowercase__ )}''' )
with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
__lowerCAmelCase : str =parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 696 |
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase : str ='\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__lowerCAmelCase : Tuple ='\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__lowerCAmelCase : Optional[int] ='\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[int] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False ) -> int:
if return_pvalue:
__SCREAMING_SNAKE_CASE : int = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )}
| 696 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Dict=13 , lowerCAmelCase__ :Optional[Any]=7 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[str]=99 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple=5 , lowerCAmelCase__ :Optional[int]=4 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :List[Any]="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Any=512 , lowerCAmelCase__ :List[str]=16 , lowerCAmelCase__ :Optional[Any]=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=3 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Union[str, Any]=4 , lowerCAmelCase__ :Tuple=1 , ) -> Any:
__SCREAMING_SNAKE_CASE : List[str] = parent
__SCREAMING_SNAKE_CASE : Tuple = batch_size
__SCREAMING_SNAKE_CASE : str = seq_length
__SCREAMING_SNAKE_CASE : Optional[int] = is_training
__SCREAMING_SNAKE_CASE : int = use_input_mask
__SCREAMING_SNAKE_CASE : Dict = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : List[Any] = vocab_size
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : str = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = type_vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Dict = num_labels
__SCREAMING_SNAKE_CASE : List[Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : int = q_groups
__SCREAMING_SNAKE_CASE : Any = k_groups
__SCREAMING_SNAKE_CASE : Tuple = v_groups
__SCREAMING_SNAKE_CASE : List[Any] = post_attention_groups
__SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_groups
__SCREAMING_SNAKE_CASE : Any = output_groups
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Any = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__( self :List[Any] ) -> Any:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = SqueezeBertModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> int:
__SCREAMING_SNAKE_CASE : Optional[int] = SqueezeBertForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Tuple = SqueezeBertForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = self.num_labels
__SCREAMING_SNAKE_CASE : Tuple = SqueezeBertForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Dict = SqueezeBertForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] ) -> Any:
__SCREAMING_SNAKE_CASE : str = self.num_choices
__SCREAMING_SNAKE_CASE : str = SqueezeBertForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : Tuple = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE__ : Dict = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = SqueezeBertModelTester(self )
__SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , dim=37 )
def __magic_name__( self :Optional[int] ) -> int:
self.config_tester.run_common_tests()
def __magic_name__( self :int ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCAmelCase__ )
def __magic_name__( self :Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> Dict:
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCAmelCase__ )
def __magic_name__( self :str ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Any ) -> str:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Any = SqueezeBertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :int ) -> Any:
__SCREAMING_SNAKE_CASE : Tuple = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4 ) )
| 696 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : int ={
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2}
__lowerCAmelCase : Union[str, Any] ={}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer
def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
__SCREAMING_SNAKE_CASE : str = strip_accents
__SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = do_lower_case
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 696 | 1 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = 0.00
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for resistor in resistors:
if resistor <= 0:
__SCREAMING_SNAKE_CASE : List[Any] = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : str = 0.00
__SCREAMING_SNAKE_CASE : str = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
import os
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = len(grid[0] )
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Dict = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowercase__ ):
for j in range(n_rows - 3 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__SCREAMING_SNAKE_CASE : Optional[int] = max(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if max_product > largest:
__SCREAMING_SNAKE_CASE : Tuple = max_product
return largest
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
with open(os.path.dirname(lowercase__ ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
__SCREAMING_SNAKE_CASE : str = [[int(lowercase__ ) for i in grid[j]] for j in range(len(lowercase__ ) )]
return largest_product(lowercase__ )
if __name__ == "__main__":
print(solution())
| 696 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : List[str] ={
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] =['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] =[
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] =[
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) )
class _lowercase :
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier
__SCREAMING_SNAKE_CASE : Dict = min_depth
__SCREAMING_SNAKE_CASE : List[str] = tf_padding
__SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier )
__SCREAMING_SNAKE_CASE : List[str] = output_stride
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = scope
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __magic_name__( self :Dict ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __magic_name__( self :List[Any] ) -> List[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __magic_name__( self :Any ) -> Dict:
pass
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Optional[int] = 26
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :List[str] ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
__SCREAMING_SNAKE_CASE : int = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 696 | 1 |
def _UpperCamelCase ( lowercase__ ):
if length <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(lowercase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 696 |
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase : List[Any] =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = Github(os.environ['''GITHUB_TOKEN'''] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = g.get_repo('''huggingface/diffusers''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 696 | 1 |
from __future__ import annotations
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = text, pattern
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = len(lowerCAmelCase__ ), len(lowerCAmelCase__ )
def __magic_name__( self :Dict , lowerCAmelCase__ :str ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __magic_name__( self :Tuple ) -> list[int]:
# searches pattern in text and returns index positions
__SCREAMING_SNAKE_CASE : str = []
for i in range(self.textLen - self.patLen + 1 ):
__SCREAMING_SNAKE_CASE : int = self.mismatch_in_text(lowerCAmelCase__ )
if mismatch_index == -1:
positions.append(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = self.match_in_pattern(self.text[mismatch_index] )
__SCREAMING_SNAKE_CASE : Dict = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__lowerCAmelCase : int ='ABAABA'
__lowerCAmelCase : Any ='AB'
__lowerCAmelCase : Optional[int] =BoyerMooreSearch(text, pattern)
__lowerCAmelCase : List[Any] =bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''canine'''
def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
# Character config:
__SCREAMING_SNAKE_CASE : Tuple = downsampling_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size
__SCREAMING_SNAKE_CASE : Any = num_hash_functions
__SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets
__SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
| 696 | 1 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :int , lowerCAmelCase__ :List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = 3
__SCREAMING_SNAKE_CASE : int = 250
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def __magic_name__( self :List[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self._get_tensors(5 )
__SCREAMING_SNAKE_CASE : Optional[int] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :str ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = MaxLengthCriteria(max_length=10 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :str ) -> str:
__SCREAMING_SNAKE_CASE : Dict = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : List[str] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __magic_name__( self :Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = self._get_tensors(5 )
__SCREAMING_SNAKE_CASE : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Tuple = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :Any ) -> str:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__SCREAMING_SNAKE_CASE : str = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''mems''']
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = []
self.cutoffs.extend(lowerCAmelCase__ )
if proj_share_all_but_first:
__SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs )
else:
__SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs )
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed
__SCREAMING_SNAKE_CASE : Tuple = d_head
__SCREAMING_SNAKE_CASE : Dict = d_inner
__SCREAMING_SNAKE_CASE : Optional[Any] = div_val
__SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm
__SCREAMING_SNAKE_CASE : List[str] = n_layer
__SCREAMING_SNAKE_CASE : int = n_head
__SCREAMING_SNAKE_CASE : str = mem_len
__SCREAMING_SNAKE_CASE : Union[str, Any] = same_length
__SCREAMING_SNAKE_CASE : str = attn_type
__SCREAMING_SNAKE_CASE : Dict = clamp_len
__SCREAMING_SNAKE_CASE : Tuple = sample_softmax
__SCREAMING_SNAKE_CASE : Optional[int] = adaptive
__SCREAMING_SNAKE_CASE : int = dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = dropatt
__SCREAMING_SNAKE_CASE : int = untie_r
__SCREAMING_SNAKE_CASE : Optional[int] = init
__SCREAMING_SNAKE_CASE : List[str] = init_range
__SCREAMING_SNAKE_CASE : Any = proj_init_std
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :str ) -> int:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 696 | 1 |
from typing import Any
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
_validation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# Creates data structures and fill initial step
__SCREAMING_SNAKE_CASE : dict = {}
__SCREAMING_SNAKE_CASE : dict = {}
for state in states_space:
__SCREAMING_SNAKE_CASE : List[str] = observations_space[0]
__SCREAMING_SNAKE_CASE : Optional[int] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__SCREAMING_SNAKE_CASE : Optional[int] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase__ ) ):
__SCREAMING_SNAKE_CASE : str = observations_space[o]
__SCREAMING_SNAKE_CASE : List[str] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__SCREAMING_SNAKE_CASE : Dict = ''''''
__SCREAMING_SNAKE_CASE : Any = -1
for k_state in states_space:
__SCREAMING_SNAKE_CASE : Tuple = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__SCREAMING_SNAKE_CASE : List[str] = probability
__SCREAMING_SNAKE_CASE : List[Any] = k_state
# Update probabilities and pointers dicts
__SCREAMING_SNAKE_CASE : Dict = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__SCREAMING_SNAKE_CASE : str = arg_max
# The final observation
__SCREAMING_SNAKE_CASE : Dict = observations_space[len(lowercase__ ) - 1]
# argmax for given final observation
__SCREAMING_SNAKE_CASE : str = ''''''
__SCREAMING_SNAKE_CASE : List[Any] = -1
for k_state in states_space:
__SCREAMING_SNAKE_CASE : Union[str, Any] = probabilities[(k_state, final_observation)]
if probability > max_probability:
__SCREAMING_SNAKE_CASE : List[str] = probability
__SCREAMING_SNAKE_CASE : int = k_state
__SCREAMING_SNAKE_CASE : Tuple = arg_max
# Process pointers backwards
__SCREAMING_SNAKE_CASE : Dict = last_state
__SCREAMING_SNAKE_CASE : int = []
for o in range(len(lowercase__ ) - 1 , -1 , -1 ):
result.append(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
_validate_not_empty(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
_validate_lists(lowercase__ , lowercase__ )
_validate_dicts(
lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
_validate_list(lowercase__ , '''observations_space''' )
_validate_list(lowercase__ , '''states_space''' )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if not isinstance(_object , lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = F'''{var_name} must be a list'''
raise ValueError(lowercase__ )
else:
for x in _object:
if not isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = F'''{var_name} must be a list of strings'''
raise ValueError(lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , ):
_validate_dict(lowercase__ , '''initial_probabilities''' , lowercase__ )
_validate_nested_dict(lowercase__ , '''transition_probabilities''' )
_validate_nested_dict(lowercase__ , '''emission_probabilities''' )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
_validate_dict(_object , lowercase__ , lowercase__ )
for x in _object.values():
_validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
if not isinstance(_object , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = F'''{var_name} must be a dict'''
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ):
__SCREAMING_SNAKE_CASE : str = F'''{var_name} all keys must be strings'''
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ):
__SCREAMING_SNAKE_CASE : int = '''nested dictionary ''' if nested else ''''''
__SCREAMING_SNAKE_CASE : List[str] = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : Any ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert'''
def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = position_embedding_type
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
| 696 | 1 |
import math
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = int(math.floor(math.sqrt(lowercase__ ) ) )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
while arr[min(lowercase__ , lowercase__ ) - 1] < x:
__SCREAMING_SNAKE_CASE : Dict = step
step += int(math.floor(math.sqrt(lowercase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__SCREAMING_SNAKE_CASE : Optional[Any] = prev + 1
if prev == min(lowercase__ , lowercase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : Optional[int] =[int(item) for item in user_input.split(',')]
__lowerCAmelCase : List[Any] =int(input('Enter the number to be searched:\n'))
__lowerCAmelCase : Optional[Any] =jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(f"""Number {x} is at index {res}""")
| 696 |
import os
import sys
import unittest
__lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers')
__lowerCAmelCase : Optional[Any] ='\n{0} = None\n'
__lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
__lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tokenizers''' )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' )
__SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' )
__SCREAMING_SNAKE_CASE : Any = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' )
__SCREAMING_SNAKE_CASE : List[str] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCAmelCase__ )
self.assertIn('''tensorflow_text''' , lowerCAmelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' )
__SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__SCREAMING_SNAKE_CASE : int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
| 696 | 1 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def _UpperCamelCase ( lowercase__ ):
return np.dot(lowercase__ , lowercase__ )
class _lowercase :
'''simple docstring'''
def __init__( self :Dict , *,
lowerCAmelCase__ :float = np.inf , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :float = 0.0 , ) -> None:
__SCREAMING_SNAKE_CASE : str = regularization
__SCREAMING_SNAKE_CASE : Tuple = gamma
if kernel == "linear":
__SCREAMING_SNAKE_CASE : Optional[int] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
__SCREAMING_SNAKE_CASE : Dict = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__SCREAMING_SNAKE_CASE : str = f'''Unknown kernel: {kernel}'''
raise ValueError(lowerCAmelCase__ )
def __magic_name__( self :Tuple , lowerCAmelCase__ :ndarray , lowerCAmelCase__ :ndarray ) -> float:
return np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :ndarray , lowerCAmelCase__ :ndarray ) -> float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __magic_name__( self :int , lowerCAmelCase__ :list[ndarray] , lowerCAmelCase__ :ndarray ) -> None:
__SCREAMING_SNAKE_CASE : Optional[int] = observations
__SCREAMING_SNAKE_CASE : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__SCREAMING_SNAKE_CASE) , ) : List[str] = np.shape(lowerCAmelCase__ )
def to_minimize(lowerCAmelCase__ :ndarray ) -> float:
__SCREAMING_SNAKE_CASE : int = 0
((__SCREAMING_SNAKE_CASE) , ) : Optional[Any] = np.shape(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = LinearConstraint(lowerCAmelCase__ , 0 , 0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = Bounds(0 , self.regularization )
__SCREAMING_SNAKE_CASE : List[Any] = minimize(
lowerCAmelCase__ , np.ones(lowerCAmelCase__ ) , bounds=lowerCAmelCase__ , constraints=[ly_contraint] ).x
__SCREAMING_SNAKE_CASE : List[Any] = l_star
# calculating mean offset of separation plane to points
__SCREAMING_SNAKE_CASE : Dict = 0
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__SCREAMING_SNAKE_CASE : List[Any] = s / n
def __magic_name__( self :int , lowerCAmelCase__ :ndarray ) -> int:
__SCREAMING_SNAKE_CASE : int = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowerCAmelCase__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCamelCase ( lowercase__ ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0]
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return math.pow(lowercase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 696 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (PNDMScheduler,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (('''num_inference_steps''', 50),)
def __magic_name__( self :Dict , **lowerCAmelCase__ :Any ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCAmelCase__ )
return config
def __magic_name__( self :int , lowerCAmelCase__ :Any=0 , **lowerCAmelCase__ :Any ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''num_inference_steps''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.dummy_sample
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1 * sample
__SCREAMING_SNAKE_CASE : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE : Union[str, Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = scheduler_class.from_pretrained(lowerCAmelCase__ )
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE : Optional[int] = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE : List[Any] = scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
__SCREAMING_SNAKE_CASE : str = new_scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE : List[str] = scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
__SCREAMING_SNAKE_CASE : Optional[Any] = new_scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__( self :Union[str, Any] ) -> str:
pass
def __magic_name__( self :Dict , lowerCAmelCase__ :Optional[Any]=0 , **lowerCAmelCase__ :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[int] = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''num_inference_steps''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
__SCREAMING_SNAKE_CASE : Optional[Any] = 0.1 * sample
__SCREAMING_SNAKE_CASE : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
__SCREAMING_SNAKE_CASE : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class.from_pretrained(lowerCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
__SCREAMING_SNAKE_CASE : Union[str, Any] = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE : List[Any] = scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
__SCREAMING_SNAKE_CASE : Any = new_scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE : List[str] = scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
__SCREAMING_SNAKE_CASE : Tuple = new_scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__( self :Union[str, Any] , **lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = 10
__SCREAMING_SNAKE_CASE : str = self.dummy_model()
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
return sample
def __magic_name__( self :Dict ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''num_inference_steps''' , lowerCAmelCase__ )
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE : str = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample
__SCREAMING_SNAKE_CASE : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase__ , '''set_timesteps''' ):
scheduler.set_timesteps(lowerCAmelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase__ , '''set_timesteps''' ):
__SCREAMING_SNAKE_CASE : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__SCREAMING_SNAKE_CASE : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__SCREAMING_SNAKE_CASE : Any = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE : Dict = scheduler.step_prk(lowerCAmelCase__ , 0 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
__SCREAMING_SNAKE_CASE : List[Any] = scheduler.step_prk(lowerCAmelCase__ , 1 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step_plms(lowerCAmelCase__ , 0 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
__SCREAMING_SNAKE_CASE : Dict = scheduler.step_plms(lowerCAmelCase__ , 1 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __magic_name__( self :str ) -> List[str]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def __magic_name__( self :int ) -> str:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(steps_offset=1 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def __magic_name__( self :Any ) -> List[str]:
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def __magic_name__( self :int ) -> Dict:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def __magic_name__( self :Optional[int] ) -> str:
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCAmelCase__ )
def __magic_name__( self :int ) -> Optional[int]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> Optional[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
__SCREAMING_SNAKE_CASE : Union[str, Any] = 27
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample
__SCREAMING_SNAKE_CASE : int = 0.1 * sample
__SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__SCREAMING_SNAKE_CASE : List[Any] = scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
def __magic_name__( self :Dict ) -> Tuple:
with self.assertRaises(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __magic_name__( self :Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = self.full_loop()
__SCREAMING_SNAKE_CASE : int = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = self.full_loop(prediction_type='''v_prediction''' )
__SCREAMING_SNAKE_CASE : int = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def __magic_name__( self :Tuple ) -> List[str]:
# We specify different beta, so that the first alpha is 0.99
__SCREAMING_SNAKE_CASE : Optional[int] = self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.01 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def __magic_name__( self :int ) -> str:
# We specify different beta, so that the first alpha is 0.99
__SCREAMING_SNAKE_CASE : List[Any] = self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.01 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__SCREAMING_SNAKE_CASE : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ['''speech''']
def __init__( self :Union[str, Any] , *lowerCAmelCase__ :Any , **lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ['''speech'''] )
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ['''speech''']
def __init__( self :str , *lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :Optional[int] ) -> Dict:
requires_backends(self , ['''speech'''] )
| 696 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase : str ='\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__lowerCAmelCase : Tuple ='\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__lowerCAmelCase : Optional[int] ='\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[int] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False ) -> int:
if return_pvalue:
__SCREAMING_SNAKE_CASE : int = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )}
| 696 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase : int ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_2_8,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 5_0,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 1_0,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 1_0,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __magic_name__( cls :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :List[str] ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :Dict ) -> Optional[int]:
CustomConfig.register_for_auto_class()
__SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int
__SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float
__SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool
__SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = PretrainedConfig()
__SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(lowerCAmelCase__ )}.''' )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
__SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock()
__SCREAMING_SNAKE_CASE : List[Any] = 500
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError
__SCREAMING_SNAKE_CASE : str = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head:
__SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
# This test is for deprecated behavior and can be removed in v5
__SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __magic_name__( self :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json''']
__SCREAMING_SNAKE_CASE : Tuple = 768
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) )
__SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __magic_name__( self :List[str] ) -> Union[str, Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__SCREAMING_SNAKE_CASE : int = '''v4.0.0'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0'''
__SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 696 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : Any =subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : str =subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
__lowerCAmelCase : Any ='|'.join(sys.argv[1:])
__lowerCAmelCase : str =re.compile(rf"""^({joined_dirs}).*?\.py$""")
__lowerCAmelCase : int =[x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 696 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__SCREAMING_SNAKE_CASE : Any = 192
__SCREAMING_SNAKE_CASE : str = 768
__SCREAMING_SNAKE_CASE : Tuple = 12
__SCREAMING_SNAKE_CASE : Tuple = 3
__SCREAMING_SNAKE_CASE : int = [800, 1333]
__SCREAMING_SNAKE_CASE : List[Any] = False
elif yolos_name == "yolos_s_dWr":
__SCREAMING_SNAKE_CASE : Dict = 330
__SCREAMING_SNAKE_CASE : Optional[Any] = 14
__SCREAMING_SNAKE_CASE : List[str] = 6
__SCREAMING_SNAKE_CASE : List[Any] = 1320
elif "yolos_s" in yolos_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 384
__SCREAMING_SNAKE_CASE : Dict = 1536
__SCREAMING_SNAKE_CASE : Optional[Any] = 12
__SCREAMING_SNAKE_CASE : Optional[Any] = 6
elif "yolos_b" in yolos_name:
__SCREAMING_SNAKE_CASE : Tuple = [800, 1344]
__SCREAMING_SNAKE_CASE : Optional[int] = 91
__SCREAMING_SNAKE_CASE : Dict = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : List[str] = '''coco-detection-id2label.json'''
__SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = {int(lowercase__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Any = idalabel
__SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE : Dict = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[: config.hidden_size, :]
__SCREAMING_SNAKE_CASE : str = in_proj_bias[: config.hidden_size]
__SCREAMING_SNAKE_CASE : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__SCREAMING_SNAKE_CASE : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[-config.hidden_size :, :]
__SCREAMING_SNAKE_CASE : Dict = in_proj_bias[-config.hidden_size :]
def _UpperCamelCase ( lowercase__ ):
if "backbone" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
__SCREAMING_SNAKE_CASE : int = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def _UpperCamelCase ( lowercase__ , lowercase__ ):
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : List[str] = orig_state_dict.pop(lowercase__ )
if "qkv" in key:
__SCREAMING_SNAKE_CASE : List[Any] = key.split('''.''' )
__SCREAMING_SNAKE_CASE : Tuple = int(key_split[2] )
__SCREAMING_SNAKE_CASE : Dict = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim, :]
__SCREAMING_SNAKE_CASE : Tuple = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Any = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[:dim]
__SCREAMING_SNAKE_CASE : List[Any] = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : List[str] = val
return orig_state_dict
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Tuple = get_yolos_config(lowercase__ )
# load original state_dict
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(lowercase__ , map_location='''cpu''' )['''model''']
# load 🤗 model
__SCREAMING_SNAKE_CASE : Union[str, Any] = YolosForObjectDetection(lowercase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = convert_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by YolosImageProcessor
__SCREAMING_SNAKE_CASE : Dict = 800 if yolos_name != '''yolos_ti''' else 512
__SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='''coco_detection''' , size=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : Tuple = model(**lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.logits, outputs.pred_boxes
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = None, None
if yolos_name == "yolos_ti":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
__SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , lowercase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowercase__ , atol=1e-4 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
__SCREAMING_SNAKE_CASE : int = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = model_mapping[yolos_name]
image_processor.push_to_hub(lowercase__ , organization='''hustvl''' )
model.push_to_hub(lowercase__ , organization='''hustvl''' )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase : Tuple =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Optional[int] = d_kv
__SCREAMING_SNAKE_CASE : Tuple = d_ff
__SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE : List[Any] = num_heads
__SCREAMING_SNAKE_CASE : List[Any] = num_experts
__SCREAMING_SNAKE_CASE : Tuple = expert_capacity
__SCREAMING_SNAKE_CASE : List[Any] = router_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__SCREAMING_SNAKE_CASE : List[Any] = router_dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate
__SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : int = initializer_factor
__SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj
__SCREAMING_SNAKE_CASE : Any = use_cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs
__SCREAMING_SNAKE_CASE : int = router_z_loss_coef
__SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
__SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
__SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 696 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__lowerCAmelCase : Union[str, Any] =NewType('DataClass', Any)
__lowerCAmelCase : Tuple =NewType('DataClassType', Any)
def _UpperCamelCase ( lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = {str(lowercase__ ): choice for choice in choices}
return lambda lowercase__ : str_to_choice.get(lowercase__ , lowercase__ )
def _UpperCamelCase ( *,
lowercase__ = None , lowercase__ = None , lowercase__ = dataclasses.MISSING , lowercase__ = dataclasses.MISSING , lowercase__ = None , **lowercase__ , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
if aliases is not None:
__SCREAMING_SNAKE_CASE : Tuple = aliases
if help is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = help
return dataclasses.field(metadata=lowercase__ , default=lowercase__ , default_factory=lowercase__ , **lowercase__ )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Iterable[DataClassType]
def __init__( self :str , lowerCAmelCase__ :Union[DataClassType, Iterable[DataClassType]] , **lowerCAmelCase__ :Any ) -> Tuple:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__SCREAMING_SNAKE_CASE : Optional[int] = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCAmelCase__ )
if dataclasses.is_dataclass(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = [dataclass_types]
__SCREAMING_SNAKE_CASE : int = list(lowerCAmelCase__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCAmelCase__ )
@staticmethod
def __magic_name__( lowerCAmelCase__ :ArgumentParser , lowerCAmelCase__ :dataclasses.Field ) -> int:
__SCREAMING_SNAKE_CASE : Optional[int] = f'''--{field.name}'''
__SCREAMING_SNAKE_CASE : Dict = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCAmelCase__ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''aliases''' , [] )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [aliases]
__SCREAMING_SNAKE_CASE : str = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(lowerCAmelCase__ , '''UnionType''' ) and isinstance(lowerCAmelCase__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCAmelCase__ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f''' Problem encountered in field \'{field.name}\'.''' )
if type(lowerCAmelCase__ ) not in field.type.__args__:
# filter `str` in Union
__SCREAMING_SNAKE_CASE : Union[str, Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__SCREAMING_SNAKE_CASE : List[str] = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__SCREAMING_SNAKE_CASE : Optional[int] = (
field.type.__args__[0] if isinstance(lowerCAmelCase__ , field.type.__args__[1] ) else field.type.__args__[1]
)
__SCREAMING_SNAKE_CASE : Optional[Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__SCREAMING_SNAKE_CASE : Optional[int] = {}
if origin_type is Literal or (isinstance(field.type , lowerCAmelCase__ ) and issubclass(field.type , lowerCAmelCase__ )):
if origin_type is Literal:
__SCREAMING_SNAKE_CASE : Dict = field.type.__args__
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [x.value for x in field.type]
__SCREAMING_SNAKE_CASE : Tuple = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__SCREAMING_SNAKE_CASE : Union[str, Any] = field.default
else:
__SCREAMING_SNAKE_CASE : Optional[int] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__SCREAMING_SNAKE_CASE : Optional[int] = copy(lowerCAmelCase__ )
# Hack because type=bool in argparse does not behave as we want.
__SCREAMING_SNAKE_CASE : str = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__SCREAMING_SNAKE_CASE : List[str] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__SCREAMING_SNAKE_CASE : int = default
# This tells argparse we accept 0 or 1 value after --field_name
__SCREAMING_SNAKE_CASE : Tuple = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__SCREAMING_SNAKE_CASE : str = True
elif isclass(lowerCAmelCase__ ) and issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = field.type.__args__[0]
__SCREAMING_SNAKE_CASE : Tuple = '''+'''
if field.default_factory is not dataclasses.MISSING:
__SCREAMING_SNAKE_CASE : Optional[int] = field.default_factory()
elif field.default is dataclasses.MISSING:
__SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
__SCREAMING_SNAKE_CASE : int = field.type
if field.default is not dataclasses.MISSING:
__SCREAMING_SNAKE_CASE : Any = field.default
elif field.default_factory is not dataclasses.MISSING:
__SCREAMING_SNAKE_CASE : Tuple = field.default_factory()
else:
__SCREAMING_SNAKE_CASE : str = True
parser.add_argument(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
parser.add_argument(f'''--no_{field.name}''' , action='''store_false''' , dest=field.name , **lowerCAmelCase__ )
def __magic_name__( self :Any , lowerCAmelCase__ :DataClassType ) -> Optional[Any]:
if hasattr(lowerCAmelCase__ , '''_argument_group_name''' ):
__SCREAMING_SNAKE_CASE : List[str] = self.add_argument_group(dtype._argument_group_name )
else:
__SCREAMING_SNAKE_CASE : List[Any] = self
try:
__SCREAMING_SNAKE_CASE : Dict[str, type] = get_type_hints(lowerCAmelCase__ )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : int = '''.'''.join(map(lowerCAmelCase__ , sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(lowerCAmelCase__ ):
if not field.init:
continue
__SCREAMING_SNAKE_CASE : Optional[Any] = type_hints[field.name]
self._parse_dataclass_field(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :str=None , lowerCAmelCase__ :int=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__SCREAMING_SNAKE_CASE : Optional[int] = []
if args_filename:
args_files.append(Path(lowerCAmelCase__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__SCREAMING_SNAKE_CASE : Optional[int] = ArgumentParser()
args_file_parser.add_argument(lowerCAmelCase__ , type=lowerCAmelCase__ , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = args_file_parser.parse_known_args(args=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = vars(lowerCAmelCase__ ).get(args_file_flag.lstrip('''-''' ) , lowerCAmelCase__ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCAmelCase__ ) for p in cmd_args_file_paths] )
__SCREAMING_SNAKE_CASE : List[str] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__SCREAMING_SNAKE_CASE : Optional[int] = file_args + args if args is not None else file_args + sys.argv[1:]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self.parse_known_args(args=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = []
for dtype in self.dataclass_types:
__SCREAMING_SNAKE_CASE : Union[str, Any] = {f.name for f in dataclasses.fields(lowerCAmelCase__ ) if f.init}
__SCREAMING_SNAKE_CASE : str = {k: v for k, v in vars(lowerCAmelCase__ ).items() if k in keys}
for k in keys:
delattr(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = dtype(**lowerCAmelCase__ )
outputs.append(lowerCAmelCase__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCAmelCase__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :Dict[str, Any] , lowerCAmelCase__ :bool = False ) -> Tuple[DataClass, ...]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = set(args.keys() )
__SCREAMING_SNAKE_CASE : List[str] = []
for dtype in self.dataclass_types:
__SCREAMING_SNAKE_CASE : Optional[Any] = {f.name for f in dataclasses.fields(lowerCAmelCase__ ) if f.init}
__SCREAMING_SNAKE_CASE : str = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__SCREAMING_SNAKE_CASE : int = dtype(**lowerCAmelCase__ )
outputs.append(lowerCAmelCase__ )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(lowerCAmelCase__ )}''' )
return tuple(lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False ) -> Tuple[DataClass, ...]:
with open(Path(lowerCAmelCase__ ) , encoding='''utf-8''' ) as open_json_file:
__SCREAMING_SNAKE_CASE : Any = json.loads(open_json_file.read() )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.parse_dict(lowerCAmelCase__ , allow_extra_keys=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __magic_name__( self :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False ) -> Tuple[DataClass, ...]:
__SCREAMING_SNAKE_CASE : List[Any] = self.parse_dict(yaml.safe_load(Path(lowerCAmelCase__ ).read_text() ) , allow_extra_keys=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 696 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 696 | 1 |
def _UpperCamelCase ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('''only integers accepted as input''' )
else:
__SCREAMING_SNAKE_CASE : int = str(abs(lowercase__ ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [list(lowercase__ ) for char in range(len(lowercase__ ) )]
for index in range(len(lowercase__ ) ):
num_transpositions[index].pop(lowercase__ )
return max(
int(''''''.join(list(lowercase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 696 |
from datetime import datetime
import requests
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowercase__ ).content
if __name__ == "__main__":
__lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip()
__lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 696 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.