code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = torch.device('cpu')
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = dct.pop(SCREAMING_SNAKE_CASE )
lowercase__ = val
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
for k in state_dict.keys():
lowercase__ = k
if ".pwconv" in k:
lowercase__ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
lowercase__ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
lowercase__ = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
lowercase__ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
lowercase__ = k_new.split('''.''' )
if ls[2].isdigit():
lowercase__ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
lowercase__ = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowercase__ = 10_00
lowercase__ = '''huggingface/label-files'''
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowercase__ = [3, 3, 6, 4]
lowercase__ = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
lowercase__ = [3, 3, 9, 6]
lowercase__ = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
lowercase__ = [4, 3, 10, 5]
lowercase__ = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
lowercase__ = [4, 4, 12, 6]
lowercase__ = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
lowercase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='''cpu''' , check_hash=SCREAMING_SNAKE_CASE )
else:
lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
lowercase__ = checkpoint
lowercase__ = create_rename_keys(SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowercase__ = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(SCREAMING_SNAKE_CASE )
# prepare test inputs
lowercase__ = prepare_img()
lowercase__ = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
lowercase__ = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
# compare outputs from both models
lowercase__ = get_expected_output(SCREAMING_SNAKE_CASE )
lowercase__ = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , SCREAMING_SNAKE_CASE , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
lowerCAmelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 43 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowerCamelCase : Optional[int] = field(
default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowerCamelCase : Optional[int] = field(
default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
lowerCamelCase : Optional[int] = field(
default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase : Optional[int] = field(
default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase : Optional[int] = field(
default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[float] = field(
default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase : Optional[int] = field(
default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) | 687 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : BigBirdConfig
lowerCamelCase_ : jnp.dtype = jnp.floataa
lowerCamelCase_ : bool = True
def lowerCAmelCase_ ( self ) -> str:
super().setup()
snake_case_ = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *lowerCamelCase , **lowerCamelCase ) -> Union[str, Any]:
snake_case_ = super().__call__(*lowerCamelCase , **lowerCamelCase )
snake_case_ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Dict = FlaxBigBirdForNaturalQuestionsModule
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ):
snake_case_ = logits.shape[-1]
snake_case_ = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("""f4""" )
snake_case_ = jax.nn.log_softmax(lowercase_ , axis=-1 )
snake_case_ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
snake_case_ = reduction(lowercase_ )
return loss
snake_case_ = partial(lowercase_ , reduction=jnp.mean )
snake_case_ = cross_entropy(lowercase_ , lowercase_ )
snake_case_ = cross_entropy(lowercase_ , lowercase_ )
snake_case_ = cross_entropy(lowercase_ , lowercase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __lowerCamelCase :
lowerCamelCase_ : str = "google/bigbird-roberta-base"
lowerCamelCase_ : int = 3_000
lowerCamelCase_ : int = 10_500
lowerCamelCase_ : int = 128
lowerCamelCase_ : int = 3
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 5
# tx_args
lowerCamelCase_ : float = 3E-5
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 20_000
lowerCamelCase_ : float = 0.0_0_9_5
lowerCamelCase_ : str = "bigbird-roberta-natural-questions"
lowerCamelCase_ : str = "training-expt"
lowerCamelCase_ : str = "data/nq-training.jsonl"
lowerCamelCase_ : str = "data/nq-validation.jsonl"
def lowerCAmelCase_ ( self ) -> List[Any]:
os.makedirs(self.base_dir , exist_ok=lowerCamelCase )
snake_case_ = os.path.join(self.base_dir , self.save_dir )
snake_case_ = self.batch_size_per_device * jax.device_count()
@dataclass
class __lowerCamelCase :
lowerCamelCase_ : int
lowerCamelCase_ : int = 4_096 # no dynamic padding on TPUs
def __call__( self , lowerCamelCase ) -> Tuple:
snake_case_ = self.collate_fn(lowerCamelCase )
snake_case_ = jax.tree_util.tree_map(lowerCamelCase , lowerCamelCase )
return batch
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Tuple:
snake_case_ , snake_case_ = self.fetch_inputs(features["""input_ids"""] )
snake_case_ = {
"""input_ids""": jnp.array(lowerCamelCase , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(lowerCamelCase , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def lowerCAmelCase_ ( self , lowerCamelCase ) -> int:
snake_case_ = [self._fetch_inputs(lowerCamelCase ) for ids in input_ids]
return zip(*lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Optional[Any]:
snake_case_ = [1 for _ in range(len(lowerCamelCase ) )]
while len(lowerCamelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=None ) -> Any:
'''simple docstring'''
if seed is not None:
snake_case_ = dataset.shuffle(seed=lowercase_ )
for i in range(len(lowercase_ ) // batch_size ):
snake_case_ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase_ )
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase( lowercase_ , lowercase_ , **lowercase_ ) -> str:
'''simple docstring'''
def loss_fn(lowercase_ ):
snake_case_ = model_inputs.pop("""start_labels""" )
snake_case_ = model_inputs.pop("""end_labels""" )
snake_case_ = model_inputs.pop("""pooled_labels""" )
snake_case_ = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ )
snake_case_ , snake_case_ , snake_case_ = outputs
return state.loss_fn(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
snake_case_ , snake_case_ = jax.random.split(lowercase_ )
snake_case_ = jax.value_and_grad(lowercase_ )
snake_case_ , snake_case_ = grad_fn(state.params )
snake_case_ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
snake_case_ = jax.lax.pmean(lowercase_ , """batch""" )
snake_case_ = state.apply_gradients(grads=lowercase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase( lowercase_ , **lowercase_ ) -> int:
'''simple docstring'''
snake_case_ = model_inputs.pop("""start_labels""" )
snake_case_ = model_inputs.pop("""end_labels""" )
snake_case_ = model_inputs.pop("""pooled_labels""" )
snake_case_ = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ )
snake_case_ , snake_case_ , snake_case_ = outputs
snake_case_ = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class __lowerCamelCase ( train_state.TrainState ):
lowerCamelCase_ : Callable = struct.field(pytree_node=__snake_case )
@dataclass
class __lowerCamelCase :
lowerCamelCase_ : Args
lowerCamelCase_ : Callable
lowerCamelCase_ : Callable
lowerCamelCase_ : Callable
lowerCamelCase_ : Callable
lowerCamelCase_ : wandb
lowerCamelCase_ : Callable = None
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ) -> Tuple:
snake_case_ = model.params
snake_case_ = TrainState.create(
apply_fn=model.__call__ , params=lowerCamelCase , tx=lowerCamelCase , loss_fn=lowerCamelCase , )
if ckpt_dir is not None:
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = restore_checkpoint(lowerCamelCase , lowerCamelCase )
snake_case_ = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
snake_case_ , snake_case_ = build_tx(**lowerCamelCase )
snake_case_ = train_state.TrainState(
step=lowerCamelCase , apply_fn=model.__call__ , params=lowerCamelCase , tx=lowerCamelCase , opt_state=lowerCamelCase , )
snake_case_ = args
snake_case_ = data_collator
snake_case_ = lr
snake_case_ = params
snake_case_ = jax_utils.replicate(lowerCamelCase )
return state
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
snake_case_ = self.args
snake_case_ = len(lowerCamelCase ) // args.batch_size
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = jax.random.split(lowerCamelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
snake_case_ = jnp.array(0 , dtype=jnp.floataa )
snake_case_ = get_batched_dataset(lowerCamelCase , args.batch_size , seed=lowerCamelCase )
snake_case_ = 0
for batch in tqdm(lowerCamelCase , total=lowerCamelCase , desc=f'''Running EPOCH-{epoch}''' ):
snake_case_ = self.data_collator(lowerCamelCase )
snake_case_ , snake_case_ , snake_case_ = self.train_step_fn(lowerCamelCase , lowerCamelCase , **lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
snake_case_ = jax_utils.unreplicate(state.step )
snake_case_ = running_loss.item() / i
snake_case_ = self.scheduler_fn(state_step - 1 )
snake_case_ = self.evaluate(lowerCamelCase , lowerCamelCase )
snake_case_ = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(lowerCamelCase ) )
self.logger.log(lowerCamelCase , commit=lowerCamelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> Any:
snake_case_ = get_batched_dataset(lowerCamelCase , self.args.batch_size )
snake_case_ = len(lowerCamelCase ) // self.args.batch_size
snake_case_ = jnp.array(0 , dtype=jnp.floataa )
snake_case_ = 0
for batch in tqdm(lowerCamelCase , total=lowerCamelCase , desc="""Evaluating ... """ ):
snake_case_ = self.data_collator(lowerCamelCase )
snake_case_ = self.val_step_fn(lowerCamelCase , **lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> Tuple:
snake_case_ = jax_utils.unreplicate(lowerCamelCase )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ )
self.model_save_fn(lowerCamelCase , params=state.params )
with open(os.path.join(lowerCamelCase , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(lowerCamelCase , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(lowerCamelCase , """data_collator.joblib""" ) )
with open(os.path.join(lowerCamelCase , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , lowerCamelCase )
print("""DONE""" )
def UpperCamelCase( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ )
with open(os.path.join(lowercase_ , """flax_model.msgpack""" ) , """rb""" ) as f:
snake_case_ = from_bytes(state.params , f.read() )
with open(os.path.join(lowercase_ , """opt_state.msgpack""" ) , """rb""" ) as f:
snake_case_ = from_bytes(state.opt_state , f.read() )
snake_case_ = joblib.load(os.path.join(lowercase_ , """args.joblib""" ) )
snake_case_ = joblib.load(os.path.join(lowercase_ , """data_collator.joblib""" ) )
with open(os.path.join(lowercase_ , """training_state.json""" ) , """r""" ) as f:
snake_case_ = json.load(lowercase_ )
snake_case_ = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
snake_case_ = num_train_steps - warmup_steps
snake_case_ = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ )
snake_case_ = optax.linear_schedule(init_value=lowercase_ , end_value=1e-7 , transition_steps=lowercase_ )
snake_case_ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
def weight_decay_mask(lowercase_ ):
snake_case_ = traverse_util.flatten_dict(lowercase_ )
snake_case_ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase_ )
snake_case_ = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ )
return tx, lr | 706 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __lowerCamelCase ( __snake_case ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> int:
super().__init__()
snake_case_ = value_function
snake_case_ = unet
snake_case_ = scheduler
snake_case_ = env
snake_case_ = env.get_dataset()
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].mean()
except: # noqa: E722
pass
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].std()
except: # noqa: E722
pass
snake_case_ = env.observation_space.shape[0]
snake_case_ = env.action_space.shape[0]
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> int:
return (x_in - self.means[key]) / self.stds[key]
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> Dict:
return x_in * self.stds[key] + self.means[key]
def lowerCAmelCase_ ( self , lowerCamelCase ) -> List[str]:
if type(lowerCamelCase ) is dict:
return {k: self.to_torch(lowerCamelCase ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase , device=self.unet.device )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
for key, val in cond.items():
snake_case_ = val.clone()
return x_in
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
snake_case_ = x.shape[0]
snake_case_ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
snake_case_ = torch.full((batch_size,) , lowerCamelCase , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
snake_case_ = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase ).sample
snake_case_ = torch.autograd.grad([y.sum()] , [x] )[0]
snake_case_ = self.scheduler._get_variance(lowerCamelCase )
snake_case_ = torch.exp(0.5 * posterior_variance )
snake_case_ = model_std * grad
snake_case_ = 0
snake_case_ = x.detach()
snake_case_ = x + scale * grad
snake_case_ = self.reset_xa(lowerCamelCase , lowerCamelCase , self.action_dim )
snake_case_ = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
snake_case_ = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , predict_epsilon=lowerCamelCase )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
snake_case_ = self.reset_xa(lowerCamelCase , lowerCamelCase , self.action_dim )
snake_case_ = self.to_torch(lowerCamelCase )
return x, y
def __call__( self , lowerCamelCase , lowerCamelCase=64 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=0.1 ) -> List[Any]:
# normalize the observations and create batch dimension
snake_case_ = self.normalize(lowerCamelCase , """observations""" )
snake_case_ = obs[None].repeat(lowerCamelCase , axis=0 )
snake_case_ = {0: self.to_torch(lowerCamelCase )}
snake_case_ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
snake_case_ = randn_tensor(lowerCamelCase , device=self.unet.device )
snake_case_ = self.reset_xa(lowerCamelCase , lowerCamelCase , self.action_dim )
snake_case_ = self.to_torch(lowerCamelCase )
# run the diffusion process
snake_case_ , snake_case_ = self.run_diffusion(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# sort output trajectories by value
snake_case_ = y.argsort(0 , descending=lowerCamelCase ).squeeze()
snake_case_ = x[sorted_idx]
snake_case_ = sorted_values[:, :, : self.action_dim]
snake_case_ = actions.detach().cpu().numpy()
snake_case_ = self.de_normalize(lowerCamelCase , key="""actions""" )
# select the action with the highest value
if y is not None:
snake_case_ = 0
else:
# if we didn't run value guiding, select a random action
snake_case_ = np.random.randint(0 , lowerCamelCase )
snake_case_ = denorm_actions[selected_index, 0]
return denorm_actions | 161 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a_ = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 221 | import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a_ = logging.get_logger(__name__)
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
def __init__( self: Tuple , *__lowerCAmelCase: str , **__lowerCAmelCase: Optional[Any] ) -> None:
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 221 | 1 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : Union[str, Any] = ComputeEnvironment.AMAZON_SAGEMAKER
a_ : Any = True
a_ : Optional[Any] = """ml.p3.2xlarge"""
a_ : Any = """accelerate_sagemaker_execution_role"""
a_ : Optional[int] = """hf-sm"""
a_ : Dict = """us-east-1"""
a_ : Tuple = 1
a_ : Any = """accelerate-sagemaker-1"""
a_ : Union[str, Any] = """1.6"""
a_ : Optional[Any] = """4.4"""
a_ : List[str] = """train.py"""
a_ : Dict = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
a_ : Optional[int] = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
_lowerCamelCase : Tuple = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['model_name_or_path'] , A )
assert isinstance(converted_args['do_train'] , A )
assert isinstance(converted_args['epochs'] , A )
assert isinstance(converted_args['learning_rate'] , A )
assert isinstance(converted_args['max_steps'] , A )
with pytest.raises(A ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 707 |
"""simple docstring"""
from collections import defaultdict
from math import gcd
def UpperCAmelCase_ ( __a : int = 1_50_00_00 ):
'''simple docstring'''
_lowerCamelCase : defaultdict = defaultdict(__a )
_lowerCamelCase : Tuple = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __a , 2 ):
if gcd(__a , __a ) > 1:
continue
_lowerCamelCase : Optional[int] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__a , limit + 1 , __a ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 349 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class a__ ( UpperCamelCase_ ):
def __init__( self : List[str] ,a__ : Optional[Any] ,a__ : List[Any]) -> List[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=a__ ,scheduler=a__)
@torch.no_grad()
def __call__( self : List[str] ,a__ : int = 1 ,a__ : int = 100 ,a__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,a__ : Optional[float] = None ,a__ : bool = True ,) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if audio_length_in_s is None:
_lowerCAmelCase:str = self.unet.config.sample_size / self.unet.config.sample_rate
_lowerCAmelCase:Tuple = audio_length_in_s * self.unet.config.sample_rate
_lowerCAmelCase:Dict = 2 ** len(self.unet.up_blocks)
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
F' {3 * down_scale_factor / self.unet.config.sample_rate}.')
_lowerCAmelCase:Union[str, Any] = int(a__)
if sample_size % down_scale_factor != 0:
_lowerCAmelCase:int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
F' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
''' process.''')
_lowerCAmelCase:Union[str, Any] = int(a__)
_lowerCAmelCase:Optional[int] = next(iter(self.unet.parameters())).dtype
_lowerCAmelCase:Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(a__ ,a__) and len(a__) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(a__)}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.')
_lowerCAmelCase:Optional[int] = randn_tensor(a__ ,generator=a__ ,device=self.device ,dtype=a__)
# set step values
self.scheduler.set_timesteps(a__ ,device=audio.device)
_lowerCAmelCase:Dict = self.scheduler.timesteps.to(a__)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
_lowerCAmelCase:Optional[int] = self.unet(a__ ,a__).sample
# 2. compute previous image: x_t -> t_t-1
_lowerCAmelCase:List[str] = self.scheduler.step(a__ ,a__ ,a__).prev_sample
_lowerCAmelCase:Optional[Any] = audio.clamp(-1 ,1).float().cpu().numpy()
_lowerCAmelCase:Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=a__)
| 227 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( snake_case : int , snake_case : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( snake_case : int ):
_lowerCAmelCase:Optional[Any] = []
_lowerCAmelCase:Dict = 11
_lowerCAmelCase:int = int('''1''' + '''0''' * digit_len )
for num in range(snake_case , snake_case ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(snake_case , snake_case ):
solutions.append(F'{num}/{den}' )
den += 1
num += 1
_lowerCAmelCase:Optional[Any] = 10
return solutions
def UpperCAmelCase ( snake_case : int = 2 ):
_lowerCAmelCase:Optional[int] = 1.0
for fraction in fraction_list(snake_case ):
_lowerCAmelCase:Any = Fraction(snake_case )
result *= frac.denominator / frac.numerator
return int(snake_case )
if __name__ == "__main__":
print(solution())
| 227 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ :Any = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :str = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowercase__ :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 374 |
"""simple docstring"""
def lowerCamelCase_ ( ) ->str:
"""simple docstring"""
for n in range(1 , 1_00_00_00 ):
yield n * (n + 1) // 2
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Any = 2
while i * i <= n:
__UpperCAmelCase : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCamelCase_ ( ) ->Optional[int]:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(UpperCAmelCase_ ) > 5_00 )
if __name__ == "__main__":
print(solution()) | 374 | 1 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=18 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=400 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE=False , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 20, 'width': 20}
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
SCREAMING_SNAKE_CASE_ = do_reduce_labels
def A_( self ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowercase ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
SCREAMING_SNAKE_CASE_ = Image.open(dataset[0]['file'] )
SCREAMING_SNAKE_CASE_ = Image.open(dataset[1]['file'] )
return image, map
def lowercase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE_ = Image.open(ds[1]['file'] )
SCREAMING_SNAKE_CASE_ = Image.open(ds[2]['file'] )
SCREAMING_SNAKE_CASE_ = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class a_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
A = BeitImageProcessor if is_vision_available() else None
def A_( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BeitImageProcessingTester(self )
@property
def A_( self ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A_( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , 'do_center_crop' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , 'center_crop' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , 'image_std' ) )
def A_( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 20, 'width': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
self.assertEqual(image_processor.do_reduce_labels , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
self.assertEqual(image_processor.do_reduce_labels , SCREAMING_SNAKE_CASE )
def A_( self ) -> Tuple:
"""simple docstring"""
pass
def A_( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A_( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A_( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A_( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = []
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , maps[0] , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE_ = image_processing(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE_ = image_processing(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
def A_( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE_ = image_processing(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 150 )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = image_processing(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
| 205 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class a_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
A = FlaxAutoencoderKL
@property
def A_( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ = jax.random.uniform(SCREAMING_SNAKE_CASE , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def A_( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
| 205 | 1 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_a = logging.get_logger("transformers.models.encodec")
_a = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
_a = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
_a = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
_a = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
_a = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
_a = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_a = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_a = []
_a = []
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
for attribute in key.split('''.''' ):
lowerCamelCase__ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
lowerCamelCase__ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
lowerCamelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCamelCase__ = value
elif weight_type == "weight_g":
lowerCamelCase__ = value
elif weight_type == "weight_v":
lowerCamelCase__ = value
elif weight_type == "bias":
lowerCamelCase__ = value
elif weight_type == "running_mean":
lowerCamelCase__ = value
elif weight_type == "running_var":
lowerCamelCase__ = value
elif weight_type == "num_batches_tracked":
lowerCamelCase__ = value
elif weight_type == "weight_ih_l0":
lowerCamelCase__ = value
elif weight_type == "weight_hh_l0":
lowerCamelCase__ = value
elif weight_type == "bias_ih_l0":
lowerCamelCase__ = value
elif weight_type == "bias_hh_l0":
lowerCamelCase__ = value
elif weight_type == "weight_ih_l1":
lowerCamelCase__ = value
elif weight_type == "weight_hh_l1":
lowerCamelCase__ = value
elif weight_type == "bias_ih_l1":
lowerCamelCase__ = value
elif weight_type == "bias_hh_l1":
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Union[str, Any]:
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCamelCase__ , lowerCamelCase__ = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCamelCase__ = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCamelCase__ = MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(__UpperCamelCase ,__UpperCamelCase ):
logger.info(F'{name} was ignored' )
continue
lowerCamelCase__ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCamelCase__ , lowerCamelCase__ = key.split('''.*.''' )
if prefix in name and suffix in name:
lowerCamelCase__ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
lowerCamelCase__ = True
if "*" in mapped_key:
lowerCamelCase__ = name.split(__UpperCamelCase )[0].split('''.''' )[-2]
lowerCamelCase__ = mapped_key.replace('''*''' ,__UpperCamelCase )
if "weight_g" in name:
lowerCamelCase__ = '''weight_g'''
elif "weight_v" in name:
lowerCamelCase__ = '''weight_v'''
elif "weight_ih_l0" in name:
lowerCamelCase__ = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
lowerCamelCase__ = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
lowerCamelCase__ = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
lowerCamelCase__ = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
lowerCamelCase__ = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
lowerCamelCase__ = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
lowerCamelCase__ = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
lowerCamelCase__ = '''bias_hh_l1'''
elif "bias" in name:
lowerCamelCase__ = '''bias'''
elif "weight" in name:
lowerCamelCase__ = '''weight'''
elif "running_mean" in name:
lowerCamelCase__ = '''running_mean'''
elif "running_var" in name:
lowerCamelCase__ = '''running_var'''
elif "num_batches_tracked" in name:
lowerCamelCase__ = '''num_batches_tracked'''
else:
lowerCamelCase__ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case=None ,__snake_case=None ,) -> int:
'''simple docstring'''
if config_path is not None:
lowerCamelCase__ = EncodecConfig.from_pretrained(__UpperCamelCase )
else:
lowerCamelCase__ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCamelCase__ = [8, 5, 4, 4]
lowerCamelCase__ = [2.2]
lowerCamelCase__ = 64
lowerCamelCase__ = 32000
lowerCamelCase__ = 2048
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
elif model_name == "encodec_48khz":
lowerCamelCase__ = [8, 5, 4, 2]
lowerCamelCase__ = [3.0, 6.0, 1_2.0, 2_4.0]
lowerCamelCase__ = 48000
lowerCamelCase__ = 2
lowerCamelCase__ = False
lowerCamelCase__ = '''time_group_norm'''
lowerCamelCase__ = True
lowerCamelCase__ = 1.0
lowerCamelCase__ = 0.0_1
else:
raise ValueError(F'Unknown model name: {model_name}' )
lowerCamelCase__ = EncodecModel(__UpperCamelCase )
lowerCamelCase__ = EncodecFeatureExtractor(
feature_size=config.audio_channels ,sampling_rate=config.sampling_rate ,chunk_length_s=config.chunk_length_s ,overlap=config.overlap ,)
feature_extractor.save_pretrained(__UpperCamelCase )
lowerCamelCase__ = torch.load(__UpperCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCamelCase__ = original_checkpoint['''best_state''']
recursively_load_weights(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(__UpperCamelCase )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_a = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 707 |
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case = None ,__snake_case = None ) -> None:
'''simple docstring'''
if start is None:
lowerCamelCase__ = 0
if end is None:
lowerCamelCase__ = len(__snake_case ) - 1
if start >= end:
return
lowerCamelCase__ = (start + end) // 2
slowsort(__snake_case ,__snake_case ,__snake_case )
slowsort(__snake_case ,mid + 1 ,__snake_case )
if sequence[end] < sequence[mid]:
lowerCamelCase__ , lowerCamelCase__ = sequence[mid], sequence[end]
slowsort(__snake_case ,__snake_case ,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[Any] , lowercase : Union[str, Any] , lowercase : str=13 , lowercase : Union[str, Any]=32 , lowercase : Union[str, Any]=3 , lowercase : Optional[int]=4 , lowercase : Optional[int]=[10, 20, 30, 40] , lowercase : str=[2, 2, 3, 2] , lowercase : str=True , lowercase : Optional[int]=True , lowercase : Dict=37 , lowercase : Optional[int]="gelu" , lowercase : str=10 , lowercase : int=0.02 , lowercase : Any=["stage2", "stage3", "stage4"] , lowercase : int=3 , lowercase : Any=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = num_stages
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = is_training
_snake_case = use_labels
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = out_features
_snake_case = num_labels
_snake_case = scope
_snake_case = num_stages
def A ( self : Dict ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Any ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def A ( self : Optional[int] ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowercase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=lowercase , loss_ignore_index=255 , num_labels=self.num_labels , )
def A ( self : Dict , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Dict ):
'''simple docstring'''
_snake_case = UperNetForSemanticSegmentation(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : int = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_UpperCAmelCase : Any = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : int = False
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : int = False
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Dict = False
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = UperNetModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def A ( self : Dict ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : List[Any] ):
'''simple docstring'''
return
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def A ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def A ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : Tuple , lowercase : Tuple , lowercase : Optional[int] ):
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = _config_zero_init(lowercase )
_snake_case = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case = model_class(config=lowercase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='UperNet does not have tied weights' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
@slow
def A ( self : Dict ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = UperNetForSemanticSegmentation.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a_ ( ) -> int:
_snake_case = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
_snake_case = Image.open(__lowercase ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
_snake_case = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(lowercase )
_snake_case = prepare_img()
_snake_case = processor(images=lowercase , return_tensors='pt' ).to(lowercase )
with torch.no_grad():
_snake_case = model(**lowercase )
_snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase , atol=1E-4 ) )
def A ( self : Any ):
'''simple docstring'''
_snake_case = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
_snake_case = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(lowercase )
_snake_case = prepare_img()
_snake_case = processor(images=lowercase , return_tensors='pt' ).to(lowercase )
with torch.no_grad():
_snake_case = model(**lowercase )
_snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase , atol=1E-4 ) ) | 686 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(__lowercase )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result | 686 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_snake_case = 42
_snake_case = 42
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , a_ ) -> Dict:
lowercase : list[list[Edge]] = [[] for _ in range(a_ )]
lowercase : int = size
def __getitem__( self , a_ ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def a__ ( self ) -> Any:
return self._size
def a__ ( self , a_ , a_ , a_ ) -> List[str]:
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(a_ , a_ ) )
def a__ ( self , a_ , a_ ) -> int | None:
lowercase : Optional[Any] = deque([start_vertex] )
lowercase : list[int | None] = [None] * self.size
lowercase : Dict = 0
while queue:
lowercase : int = queue.popleft()
lowercase : Optional[int] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase : Optional[int] = current_distance + edge.weight
lowercase : List[Any] = distances[edge.destination_vertex]
if (
isinstance(a_ , a_ )
and new_distance >= dest_vertex_distance
):
continue
lowercase : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 425 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self , a_ , a_=1_3 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=9_9 , a_=3_2 , a_=5 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_6 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> Any:
lowercase : List[str] = parent
lowercase : str = batch_size
lowercase : int = seq_length
lowercase : Any = is_training
lowercase : List[Any] = use_input_mask
lowercase : str = use_token_type_ids
lowercase : List[str] = use_labels
lowercase : Optional[Any] = vocab_size
lowercase : List[Any] = hidden_size
lowercase : List[Any] = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : Union[str, Any] = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : Optional[Any] = hidden_dropout_prob
lowercase : Union[str, Any] = attention_probs_dropout_prob
lowercase : List[str] = max_position_embeddings
lowercase : Tuple = type_vocab_size
lowercase : Dict = type_sequence_label_size
lowercase : Optional[Any] = initializer_range
lowercase : int = num_labels
lowercase : int = num_choices
lowercase : Tuple = scope
def a__ ( self ) -> Dict:
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] = None
if self.use_input_mask:
lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
lowercase : str = None
lowercase : Optional[int] = None
if self.use_labels:
lowercase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> Any:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[Any]:
lowercase : Tuple = DistilBertModel(config=a_ )
model.to(a_ )
model.eval()
lowercase : Optional[Any] = model(a_ , a_ )
lowercase : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> str:
lowercase : List[Any] = DistilBertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
lowercase : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> int:
lowercase : Optional[Any] = DistilBertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
lowercase : List[Any] = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
lowercase : Union[str, Any] = self.num_labels
lowercase : Optional[int] = DistilBertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowercase : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
lowercase : List[Any] = self.num_labels
lowercase : Optional[Any] = DistilBertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
lowercase : Optional[Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
lowercase : Optional[int] = self.num_choices
lowercase : Tuple = DistilBertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
lowercase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Tuple = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self ) -> Tuple:
lowercase : Any = self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : int = config_and_inputs
lowercase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_snake_case = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_snake_case = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = True
_snake_case = True
_snake_case = True
_snake_case = True
def a__ ( self ) -> int:
lowercase : str = DistilBertModelTester(self )
lowercase : Optional[Any] = ConfigTester(self , config_class=a_ , dim=3_7 )
def a__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> List[Any]:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a_ )
def a__ ( self ) -> Tuple:
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ )
def a__ ( self ) -> Union[str, Any]:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a_ )
def a__ ( self ) -> Optional[Any]:
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ )
def a__ ( self ) -> Optional[Any]:
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a_ )
def a__ ( self ) -> List[str]:
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ )
@slow
def a__ ( self ) -> List[Any]:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Union[str, Any] = DistilBertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def a__ ( self ) -> Tuple:
lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase : Any = True
lowercase : List[str] = model_class(config=a_ )
lowercase : Optional[int] = self._prepare_for_class(a_ , a_ )
lowercase : Union[str, Any] = torch.jit.trace(
a_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , "traced_model.pt" ) )
lowercase : str = torch.jit.load(os.path.join(a_ , "traced_model.pt" ) , map_location=a_ )
loaded(inputs_dict["input_ids"].to(a_ ) , inputs_dict["attention_mask"].to(a_ ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def a__ ( self ) -> List[str]:
lowercase : Dict = DistilBertModel.from_pretrained("distilbert-base-uncased" )
lowercase : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : Union[str, Any] = model(a_ , attention_mask=a_ )[0]
lowercase : List[str] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a_ )
lowercase : Optional[int] = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) )
| 425 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def UpperCamelCase__ ( lowerCAmelCase__ ):
for param in module.parameters():
lowercase = False
def UpperCamelCase__ ( ):
lowercase = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues"""
""" with generations.""" )
return device
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = plt.imshow(lowerCAmelCase__ )
fig.axes.get_xaxis().set_visible(lowerCAmelCase__ )
fig.axes.get_yaxis().set_visible(lowerCAmelCase__ )
plt.show()
def UpperCamelCase__ ( ):
lowercase = datetime.now()
lowercase = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 428 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = position
lowercase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase__ = []
for position in positions:
lowercase__ , lowercase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE )
return permissible_positions
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if is_complete(SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowercase__ , lowercase__ = position
if board[y][x] == 0:
lowercase__ = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ):
return True
lowercase__ = 0
return False
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowercase__ = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
lowercase__ = 0
lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=2 , __UpperCamelCase=99 , __UpperCamelCase=0 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase="last" , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=0 , ):
'''simple docstring'''
__a : List[str] = parent
__a : Any = batch_size
__a : List[Any] = seq_length
__a : Optional[int] = is_training
__a : Optional[int] = use_input_lengths
__a : Optional[int] = use_token_type_ids
__a : Any = use_labels
__a : Optional[int] = gelu_activation
__a : Dict = sinusoidal_embeddings
__a : Union[str, Any] = causal
__a : List[str] = asm
__a : Dict = n_langs
__a : List[Any] = vocab_size
__a : List[Any] = n_special
__a : List[str] = hidden_size
__a : Tuple = num_hidden_layers
__a : Optional[Any] = num_attention_heads
__a : List[str] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Optional[Any] = type_sequence_label_size
__a : List[Any] = initializer_range
__a : Dict = num_labels
__a : str = num_choices
__a : Union[str, Any] = summary_type
__a : int = use_proj
__a : Optional[Any] = scope
__a : int = bos_token_id
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__a : Union[str, Any] = None
if self.use_input_lengths:
__a : List[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__a : List[Any] = None
if self.use_token_type_ids:
__a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__a : List[str] = None
__a : Dict = None
__a : Any = None
if self.use_labels:
__a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Any = ids_tensor([self.batch_size] , 2 ).float()
__a : int = ids_tensor([self.batch_size] , self.num_choices )
__a : str = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowerCamelCase ( self ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
__a : Any = XLMModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Union[str, Any] = model(__UpperCamelCase , lengths=__UpperCamelCase , langs=__UpperCamelCase )
__a : Union[str, Any] = model(__UpperCamelCase , langs=__UpperCamelCase )
__a : Optional[int] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
__a : Optional[Any] = XLMWithLMHeadModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
__a : Optional[int] = XLMForQuestionAnsweringSimple(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Any = model(__UpperCamelCase )
__a : Dict = model(__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase )
__a : int = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
__a : List[Any] = XLMForQuestionAnswering(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = model(__UpperCamelCase )
__a : Optional[int] = model(
__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , cls_index=__UpperCamelCase , is_impossible=__UpperCamelCase , p_mask=__UpperCamelCase , )
__a : Tuple = model(
__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , cls_index=__UpperCamelCase , is_impossible=__UpperCamelCase , )
(__a ) : int = result_with_labels.to_tuple()
__a : Tuple = model(__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase )
(__a ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
__a : Dict = XLMForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Union[str, Any] = model(__UpperCamelCase )
__a : Tuple = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
__a : int = self.num_labels
__a : List[Any] = XLMForTokenClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Optional[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
__a : int = self.num_choices
__a : Dict = XLMForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Any = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.prepare_config_and_inputs()
(
__a
) : str = config_and_inputs
__a : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowercase__ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase__ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase__ = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
__a : List[Any] = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__a : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
__a : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = XLMModelTester(self )
__a : int = ConfigTester(self , config_class=__UpperCamelCase , emb_dim=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=1 ):
'''simple docstring'''
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(
[isinstance(__UpperCamelCase , __UpperCamelCase ) for iter_attentions in attentions] , [True] * len(__UpperCamelCase ) )
self.assertEqual(len(__UpperCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__UpperCamelCase ):
# adds PAD dummy token
__a : Tuple = min_length + idx + 1
__a : Optional[Any] = min_length + idx + 1
__a : Union[str, Any] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__UpperCamelCase ) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=1 ):
'''simple docstring'''
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(
[isinstance(__UpperCamelCase , __UpperCamelCase ) for iter_hidden_states in hidden_states] , [True] * len(__UpperCamelCase ) , )
self.assertEqual(len(__UpperCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__UpperCamelCase ):
# adds PAD dummy token
__a : str = min_length + idx + 1
__a : List[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__UpperCamelCase ) , )
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Union[str, Any] = XLMModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(__UpperCamelCase )
__a : Dict = torch.tensor([[14, 447]] , dtype=torch.long , device=__UpperCamelCase ) # the president
__a : int = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__a : List[Any] = model.generate(__UpperCamelCase , do_sample=__UpperCamelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __UpperCamelCase ) | 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__snake_case = threading.Lock()
__snake_case = None
__snake_case = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__snake_case = logging.WARNING
__snake_case = True
def _lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = os.getenv('TRANSFORMERS_VERBOSITY' , UpperCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def _lowercase ( ) -> str:
'''simple docstring'''
return __name__.split('.' )[0]
def _lowercase ( ) -> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def _lowercase ( ) -> None:
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE__ = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE__ = False
def _lowercase ( ) -> None:
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE__ = None
def _lowercase ( ) -> int:
'''simple docstring'''
return log_levels
def _lowercase ( UpperCamelCase_ = None ) -> logging.Logger:
'''simple docstring'''
if name is None:
SCREAMING_SNAKE_CASE__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCamelCase_ )
def _lowercase ( ) -> int:
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _lowercase ( UpperCamelCase_ ) -> None:
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCamelCase_ )
def _lowercase ( ) -> Any:
'''simple docstring'''
return set_verbosity(UpperCamelCase_ )
def _lowercase ( ) -> List[str]:
'''simple docstring'''
return set_verbosity(UpperCamelCase_ )
def _lowercase ( ) -> Tuple:
'''simple docstring'''
return set_verbosity(UpperCamelCase_ )
def _lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
return set_verbosity(UpperCamelCase_ )
def _lowercase ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _lowercase ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _lowercase ( UpperCamelCase_ ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCamelCase_ )
def _lowercase ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
SCREAMING_SNAKE_CASE__ = False
def _lowercase ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
SCREAMING_SNAKE_CASE__ = True
def _lowercase ( ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE__ = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(UpperCamelCase_ )
def _lowercase ( ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCamelCase_ )
def _lowercase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , UpperCamelCase_ )
if no_advisory_warnings:
return
self.warning(*UpperCamelCase_ , **UpperCamelCase_ )
__snake_case = warning_advice
@functools.lru_cache(UpperCamelCase_ )
def _lowercase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> int:
'''simple docstring'''
self.warning(*UpperCamelCase_ , **UpperCamelCase_ )
__snake_case = warning_once
class lowercase__ :
def __init__( self : Any , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[str] ): # pylint: disable=unused-argument
SCREAMING_SNAKE_CASE__ = args[0] if args else None
def __iter__( self : Optional[int] ):
return iter(self._iterator )
def __getattr__( self : Dict , UpperCAmelCase_ : Tuple ):
def empty_fn(*UpperCAmelCase_ : Any , **UpperCAmelCase_ : int ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ):
return self
def __exit__( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ):
return
class lowercase__ :
def __call__( self : int , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Any ):
if _tqdm_active:
return tqdm_lib.tqdm(*_snake_case , **_snake_case )
else:
return EmptyTqdm(*_snake_case , **_snake_case )
def A_ ( self : Optional[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_snake_case , **_snake_case )
def A_ ( self : List[str] ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__snake_case = _tqdm_cls()
def _lowercase ( ) -> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def _lowercase ( ) -> int:
'''simple docstring'''
global _tqdm_active
SCREAMING_SNAKE_CASE__ = True
hf_hub_utils.enable_progress_bars()
def _lowercase ( ) -> Optional[Any]:
'''simple docstring'''
global _tqdm_active
SCREAMING_SNAKE_CASE__ = False
hf_hub_utils.disable_progress_bars()
| 472 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 504 | 0 |
"""simple docstring"""
import os
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase = "matrix.txt" ) -> int:
with open(os.path.join(os.path.dirname(__UpperCAmelCase ) , __UpperCAmelCase ) ) as in_file:
SCREAMING_SNAKE_CASE__ = in_file.read()
SCREAMING_SNAKE_CASE__ = [[int(__UpperCAmelCase ) for cell in row.split("," )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__ = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__ = len(grid[0] )
SCREAMING_SNAKE_CASE__ = [[0 for i in range(__UpperCAmelCase )] for j in range(__UpperCAmelCase )]
SCREAMING_SNAKE_CASE__ = grid[0][0]
for i in range(1 , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = grid[0][i] + dp[0][i - 1]
for i in range(1 , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = grid[i][0] + dp[i - 1][0]
for i in range(1 , __UpperCAmelCase ):
for j in range(1 , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'{solution() = }')
| 538 | """simple docstring"""
import collections
import os
import re
from pathlib import Path
_A = 'src/transformers'
# Matches is_xxx_available()
_A = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
_A = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_A = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
_A = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
_A = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_A = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
_A = re.compile(R'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
_A = re.compile(R'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
_A = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
_A = re.compile(R'^\s*try:')
# Catches a line with else:
_A = re.compile(R'^\s*else:')
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> List[str]:
if _re_test_backend.search(__UpperCAmelCase ) is None:
return None
SCREAMING_SNAKE_CASE__ = [b[0] for b in _re_backend.findall(__UpperCAmelCase )]
backends.sort()
return "_and_".join(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> Optional[Any]:
with open(__UpperCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE__ = f.readlines()
SCREAMING_SNAKE_CASE__ = 0
while line_index < len(__UpperCAmelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
SCREAMING_SNAKE_CASE__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
SCREAMING_SNAKE_CASE__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = _re_one_line_import_struct.search(__UpperCAmelCase ).groups()[0]
SCREAMING_SNAKE_CASE__ = re.findall(R"\[([^\]]+)\]" , __UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
SCREAMING_SNAKE_CASE__ = _re_import_struct_key_value.search(__UpperCAmelCase )
if single_line_import_search is not None:
SCREAMING_SNAKE_CASE__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
SCREAMING_SNAKE_CASE__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
SCREAMING_SNAKE_CASE__ = lines[line_index]
if _re_import_struct_add_one.search(__UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCAmelCase ) is not None:
SCREAMING_SNAKE_CASE__ = _re_import_struct_add_many.search(__UpperCAmelCase ).groups()[0].split(", " )
SCREAMING_SNAKE_CASE__ = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_between_brackets.search(__UpperCAmelCase ) is not None:
SCREAMING_SNAKE_CASE__ = _re_between_brackets.search(__UpperCAmelCase ).groups()[0].split(", " )
SCREAMING_SNAKE_CASE__ = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_quote_object.search(__UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCAmelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
SCREAMING_SNAKE_CASE__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
SCREAMING_SNAKE_CASE__ = []
while (
line_index < len(__UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
SCREAMING_SNAKE_CASE__ = lines[line_index]
SCREAMING_SNAKE_CASE__ = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
SCREAMING_SNAKE_CASE__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
SCREAMING_SNAKE_CASE__ = lines[line_index]
SCREAMING_SNAKE_CASE__ = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
SCREAMING_SNAKE_CASE__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
def find_duplicates(__UpperCAmelCase ):
return [k for k, v in collections.Counter(__UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
SCREAMING_SNAKE_CASE__ = []
for key in import_dict_objects.keys():
SCREAMING_SNAKE_CASE__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
SCREAMING_SNAKE_CASE__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
SCREAMING_SNAKE_CASE__ = "base imports" if key == "none" else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
for root, _, files in os.walk(__UpperCAmelCase ):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE__ = os.path.join(__UpperCAmelCase , "__init__.py" )
SCREAMING_SNAKE_CASE__ = parse_init(__UpperCAmelCase )
if objects is not None:
SCREAMING_SNAKE_CASE__ = analyze_results(*__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
raise ValueError("\n\n".join(__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = []
for path, directories, files in os.walk(__UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCAmelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
SCREAMING_SNAKE_CASE__ = str((Path(__UpperCAmelCase ) / folder).relative_to(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = short_path.replace(os.path.sep , "." )
submodules.append(__UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
SCREAMING_SNAKE_CASE__ = str((Path(__UpperCAmelCase ) / fname).relative_to(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__UpperCAmelCase )
return submodules
_A = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
SCREAMING_SNAKE_CASE__ = direct_transformers_import(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__UpperCAmelCase , "__init__.py" ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
import_structure_keys.update(set(re.findall(R"import_structure\[\"([^\"]*)\"\]" , __UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ = "\n".join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
F"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 538 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : List[Any] = (EulerDiscreteScheduler,)
A_ : str = 1_0
def __UpperCamelCase ( self : Union[str, Any] , **__UpperCamelCase : Optional[Any] ) -> Tuple:
A = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**__UpperCamelCase )
return config
def __UpperCamelCase ( self : int ) -> List[str]:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def __UpperCamelCase ( self : int ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def __UpperCamelCase ( self : int ) -> Dict:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
A = torch.manual_seed(0 )
A = self.dummy_model()
A = self.dummy_sample_deter * scheduler.init_noise_sigma
A = sample.to(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
A = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
A = model(__UpperCamelCase , __UpperCamelCase )
A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
A = output.prev_sample
A = torch.sum(torch.abs(__UpperCamelCase ) )
A = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config(prediction_type='v_prediction' )
A = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
A = torch.manual_seed(0 )
A = self.dummy_model()
A = self.dummy_sample_deter * scheduler.init_noise_sigma
A = sample.to(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
A = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
A = model(__UpperCamelCase , __UpperCamelCase )
A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
A = output.prev_sample
A = torch.sum(torch.abs(__UpperCamelCase ) )
A = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCamelCase )
A = torch.manual_seed(0 )
A = self.dummy_model()
A = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A = sample.to(__UpperCamelCase )
for t in scheduler.timesteps:
A = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
A = model(__UpperCamelCase , __UpperCamelCase )
A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
A = output.prev_sample
A = torch.sum(torch.abs(__UpperCamelCase ) )
A = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __UpperCamelCase ( self : Optional[Any] ) -> int:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase , use_karras_sigmas=__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCamelCase )
A = torch.manual_seed(0 )
A = self.dummy_model()
A = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A = sample.to(__UpperCamelCase )
for t in scheduler.timesteps:
A = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
A = model(__UpperCamelCase , __UpperCamelCase )
A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
A = output.prev_sample
A = torch.sum(torch.abs(__UpperCamelCase ) )
A = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3 | 106 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
@staticmethod
def lowerCAmelCase (*snake_case_ : int , **snake_case_ : List[str] ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowerCAmelCase (self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Dict ):
__a : Union[str, Any] = ObjectDetectionPipeline(model=snake_case_ , image_processor=snake_case_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowerCAmelCase (self : Tuple , snake_case_ : List[str] , snake_case_ : Any ):
__a : Any = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(snake_case_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
snake_case_ , {
'''score''': ANY(snake_case_ ),
'''label''': ANY(snake_case_ ),
'''box''': {'''xmin''': ANY(snake_case_ ), '''ymin''': ANY(snake_case_ ), '''xmax''': ANY(snake_case_ ), '''ymax''': ANY(snake_case_ )},
} , )
import datasets
__a : List[Any] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
__a : List[str] = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
__a : Optional[int] = object_detector(snake_case_ , threshold=0.0 )
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for outputs in batch_outputs:
self.assertGreater(len(snake_case_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
snake_case_ , {
'''score''': ANY(snake_case_ ),
'''label''': ANY(snake_case_ ),
'''box''': {'''xmin''': ANY(snake_case_ ), '''ymin''': ANY(snake_case_ ), '''xmax''': ANY(snake_case_ ), '''ymax''': ANY(snake_case_ )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def lowerCAmelCase (self : int ):
pass
@require_torch
def lowerCAmelCase (self : Tuple ):
__a : str = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
__a : int = AutoModelForObjectDetection.from_pretrained(snake_case_ )
__a : int = AutoFeatureExtractor.from_pretrained(snake_case_ )
__a : Union[str, Any] = ObjectDetectionPipeline(model=snake_case_ , feature_extractor=snake_case_ )
__a : List[str] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
] , )
__a : Dict = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
],
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
],
] , )
@require_torch
@slow
def lowerCAmelCase (self : List[Any] ):
__a : Optional[Any] = '''facebook/detr-resnet-50'''
__a : int = AutoModelForObjectDetection.from_pretrained(snake_case_ )
__a : str = AutoFeatureExtractor.from_pretrained(snake_case_ )
__a : Tuple = ObjectDetectionPipeline(model=snake_case_ , feature_extractor=snake_case_ )
__a : Union[str, Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
__a : Optional[Any] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
] , )
@require_torch
@slow
def lowerCAmelCase (self : Optional[int] ):
__a : Any = '''facebook/detr-resnet-50'''
__a : Optional[Any] = pipeline('''object-detection''' , model=snake_case_ )
__a : Optional[int] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
__a : List[str] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
] , )
@require_torch
@slow
def lowerCAmelCase (self : Union[str, Any] ):
__a : Tuple = 0.9985
__a : Tuple = '''facebook/detr-resnet-50'''
__a : int = pipeline('''object-detection''' , model=snake_case_ )
__a : Optional[Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=snake_case_ )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
@require_torch
@require_pytesseract
@slow
def lowerCAmelCase (self : List[str] ):
__a : Optional[int] = '''Narsil/layoutlmv3-finetuned-funsd'''
__a : Any = 0.9993
__a : Tuple = pipeline('''object-detection''' , model=snake_case_ , threshold=snake_case_ )
__a : Optional[Any] = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_9_4, '''ymin''': 2_5_4, '''xmax''': 3_4_3, '''ymax''': 2_6_4}},
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_9_4, '''ymin''': 2_5_4, '''xmax''': 3_4_3, '''ymax''': 2_6_4}},
] , )
| 521 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = (DPMSolverSinglestepScheduler,)
lowerCAmelCase_ = (("num_inference_steps", 25),)
def __a ( self : List[str] , **_lowercase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**_lowercase )
return config
def __a ( self : List[Any] , _lowercase : Dict=0 , **_lowercase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""num_inference_steps""" , _lowercase )
SCREAMING_SNAKE_CASE__ = self.dummy_sample
SCREAMING_SNAKE_CASE__ = 0.1 * sample
SCREAMING_SNAKE_CASE__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(**_lowercase )
SCREAMING_SNAKE_CASE__ = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
SCREAMING_SNAKE_CASE__ = scheduler_class.from_pretrained(_lowercase )
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE__ = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = sample, sample
for t in range(_lowercase , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE__ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
SCREAMING_SNAKE_CASE__ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __a ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __a ( self : int , _lowercase : str=0 , **_lowercase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""num_inference_steps""" , _lowercase )
SCREAMING_SNAKE_CASE__ = self.dummy_sample
SCREAMING_SNAKE_CASE__ = 0.1 * sample
SCREAMING_SNAKE_CASE__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
SCREAMING_SNAKE_CASE__ = scheduler_class.from_pretrained(_lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE__ = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE__ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
SCREAMING_SNAKE_CASE__ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __a ( self : Optional[int] , _lowercase : List[Any]=None , **_lowercase : Dict ):
"""simple docstring"""
if scheduler is None:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(**_lowercase )
SCREAMING_SNAKE_CASE__ = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(**_lowercase )
SCREAMING_SNAKE_CASE__ = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ = model(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
return sample
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE__ = 50
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
SCREAMING_SNAKE_CASE__ = model(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.25_74 ) < 1E-3
def __a ( self : List[str] ):
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=_lowercase )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE__ = self.full_loop(scheduler=_lowercase )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
SCREAMING_SNAKE_CASE__ = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__ = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__ = self.full_loop(scheduler=_lowercase )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
def __a ( self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowercase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , algorithm_type="""dpmsolver++""" , solver_order=_lowercase , solver_type=_lowercase , )
def __a ( self : Optional[Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def __a ( self : Any ):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
SCREAMING_SNAKE_CASE__ = self.full_loop(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
assert not torch.isnan(_lowercase ).any(), "Samples have nan numbers"
def __a ( self : Optional[int] ):
"""simple docstring"""
self.check_over_configs(lower_order_final=_lowercase )
self.check_over_configs(lower_order_final=_lowercase )
def __a ( self : str ):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def __a ( self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(variance_type=_lowercase )
self.check_over_configs(variance_type="""learned_range""" )
def __a ( self : List[str] ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=_lowercase , time_step=0 )
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.full_loop()
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.full_loop(use_karras_sigmas=_lowercase )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.22_48 ) < 1E-3
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.full_loop(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.14_53 ) < 1E-3
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=_lowercase )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.06_49 ) < 1E-3
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(thresholding=_lowercase , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE__ = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ = model(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
assert sample.dtype == torch.floataa
| 379 | def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0, 0, 0
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 5
for _ in range(1 , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
ugly_nums.append(__UpperCamelCase )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(200) = }""")
| 379 | 1 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1337 , num_examples=42 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def _lowercase ( __lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any = split_dict._to_yaml_list()
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = SplitDict._from_yaml_list(_UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
# the split name of split_dict takes over the name of the split info object
SCREAMING_SNAKE_CASE__ : int = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name="""my_dataset""" )] )
def _lowercase ( __lowerCAmelCase ) -> Any:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
SCREAMING_SNAKE_CASE__ : List[str] = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 680 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : Any = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = '''donut-swin'''
_snake_case = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCamelCase__=2_2_4 , lowerCamelCase__=4 , lowerCamelCase__=3 , lowerCamelCase__=9_6 , lowerCamelCase__=[2, 2, 6, 2] , lowerCamelCase__=[3, 6, 1_2, 2_4] , lowerCamelCase__=7 , lowerCamelCase__=4.0 , lowerCamelCase__=True , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__="gelu" , lowerCamelCase__=False , lowerCamelCase__=0.02 , lowerCamelCase__=1e-5 , **lowerCamelCase__ , ):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(lowerCamelCase__ )
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
| 212 | 0 |
'''simple docstring'''
def snake_case ( a_ : float , a_ : float ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(100, 0.2_5) = }")
print(f"{price_plus_tax(1_2_5.5_0, 0.0_5) = }")
| 543 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def snake_case ( a_ : int ) -> Union[str, Any]:
"""simple docstring"""
random.seed(a_ )
np.random.seed(a_ )
torch.manual_seed(a_ )
torch.cuda.manual_seed_all(a_ )
# ^^ safe to call this function even if cuda is not available
class A :
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = 0.99_99 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 0 , __lowerCAmelCase = False , __lowerCAmelCase = 1.0 , __lowerCAmelCase = 2 / 3 , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
if isinstance(__lowerCAmelCase , torch.nn.Module ):
UpperCamelCase_ : Dict = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase , )
UpperCamelCase_ : str = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
UpperCamelCase_ : Optional[int] = True
if kwargs.get("""max_value""" , __lowerCAmelCase ) is not None:
UpperCamelCase_ : Tuple = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
UpperCamelCase_ : str = kwargs["""max_value"""]
if kwargs.get("""min_value""" , __lowerCAmelCase ) is not None:
UpperCamelCase_ : Dict = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
UpperCamelCase_ : Optional[Any] = kwargs["""min_value"""]
UpperCamelCase_ : Optional[Any] = list(__lowerCAmelCase )
UpperCamelCase_ : Any = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , __lowerCAmelCase ) is not None:
UpperCamelCase_ : str = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
self.to(device=kwargs["""device"""] )
UpperCamelCase_ : List[Any] = None
UpperCamelCase_ : Optional[Any] = decay
UpperCamelCase_ : List[str] = min_decay
UpperCamelCase_ : int = update_after_step
UpperCamelCase_ : Optional[int] = use_ema_warmup
UpperCamelCase_ : Optional[int] = inv_gamma
UpperCamelCase_ : Any = power
UpperCamelCase_ : str = 0
UpperCamelCase_ : List[str] = None # set in `step()`
UpperCamelCase_ : Union[str, Any] = model_cls
UpperCamelCase_ : Any = model_config
@classmethod
def _UpperCAmelCase ( cls , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ , UpperCamelCase_ : int = model_cls.load_config(__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase )
UpperCamelCase_ : str = model_cls.from_pretrained(__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = cls(model.parameters() , model_cls=__lowerCAmelCase , model_config=model.config )
ema_model.load_state_dict(__lowerCAmelCase )
return ema_model
def _UpperCAmelCase ( self , __lowerCAmelCase ):
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
UpperCamelCase_ : int = self.model_cls.from_config(self.model_config )
UpperCamelCase_ : List[Any] = self.state_dict()
state_dict.pop("""shadow_params""" , __lowerCAmelCase )
model.register_to_config(**__lowerCAmelCase )
self.copy_to(model.parameters() )
model.save_pretrained(__lowerCAmelCase )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : Any = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
UpperCamelCase_ : List[Any] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
UpperCamelCase_ : List[Any] = (1 + step) / (10 + step)
UpperCamelCase_ : Optional[Any] = min(__lowerCAmelCase , self.decay )
# make sure decay is not smaller than min_decay
UpperCamelCase_ : Optional[Any] = max(__lowerCAmelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def _UpperCAmelCase ( self , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , torch.nn.Module ):
UpperCamelCase_ : str = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase , )
UpperCamelCase_ : int = parameters.parameters()
UpperCamelCase_ : Optional[int] = list(__lowerCAmelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
UpperCamelCase_ : Any = self.get_decay(self.optimization_step )
UpperCamelCase_ : List[str] = decay
UpperCamelCase_ : Any = 1 - decay
UpperCamelCase_ : Optional[int] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __lowerCAmelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
UpperCamelCase_ : Optional[Any] = deepspeed.zero.GatheredParameters(__lowerCAmelCase , modifier_rank=__lowerCAmelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__lowerCAmelCase )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : str = list(__lowerCAmelCase )
for s_param, param in zip(self.shadow_params , __lowerCAmelCase ):
param.data.copy_(s_param.to(param.device ).data )
def _UpperCAmelCase ( self , __lowerCAmelCase=None , __lowerCAmelCase=None ):
UpperCamelCase_ : Union[str, Any] = [
p.to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ) if p.is_floating_point() else p.to(device=__lowerCAmelCase )
for p in self.shadow_params
]
def _UpperCAmelCase ( self ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : List[str] = [param.detach().cpu().clone() for param in parameters]
def _UpperCAmelCase ( self , __lowerCAmelCase ):
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , __lowerCAmelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
UpperCamelCase_ : List[Any] = None
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : List[Any] = copy.deepcopy(__lowerCAmelCase )
UpperCamelCase_ : int = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
UpperCamelCase_ : Dict = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , __lowerCAmelCase ):
raise ValueError("""Invalid min_decay""" )
UpperCamelCase_ : str = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , __lowerCAmelCase ):
raise ValueError("""Invalid optimization_step""" )
UpperCamelCase_ : Dict = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , __lowerCAmelCase ):
raise ValueError("""Invalid update_after_step""" )
UpperCamelCase_ : List[str] = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __lowerCAmelCase ):
raise ValueError("""Invalid use_ema_warmup""" )
UpperCamelCase_ : Any = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
UpperCamelCase_ : str = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
UpperCamelCase_ : Dict = state_dict.get("""shadow_params""" , __lowerCAmelCase )
if shadow_params is not None:
UpperCamelCase_ : Any = shadow_params
if not isinstance(self.shadow_params , __lowerCAmelCase ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(__lowerCAmelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 543 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "deit"
def __init__( self ,_A=768 ,_A=12 ,_A=12 ,_A=3072 ,_A="gelu" ,_A=0.0 ,_A=0.0 ,_A=0.0_2 ,_A=1E-12 ,_A=224 ,_A=16 ,_A=3 ,_A=True ,_A=16 ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : List[Any] = qkv_bias
_lowerCAmelCase : Optional[int] = encoder_stride
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = version.parse("1.11" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-4
| 259 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , np.ndarray ):
return list(tensor.shape )
_lowerCAmelCase : Optional[Any] = tf.shape(_lowerCamelCase )
if tensor.shape == tf.TensorShape(_lowerCamelCase ):
return dynamic
_lowerCAmelCase : List[Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_lowerCamelCase )]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ):
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1e-9 , axis=_lowerCamelCase , name=_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1e-5 , _lowerCamelCase=-1 ):
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = tf.nn.moments(_lowerCamelCase , axes=[axis] , keepdims=_lowerCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_lowerCAmelCase : int = [1] * inputs.shape.rank
_lowerCAmelCase : Tuple = shape_list(_lowerCamelCase )[axis]
_lowerCAmelCase : List[str] = tf.reshape(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = tf.reshape(_lowerCamelCase , _lowerCamelCase )
# Compute layer normalization using the batch_normalization
# function.
_lowerCAmelCase : Tuple = tf.nn.batch_normalization(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , offset=_lowerCamelCase , scale=_lowerCamelCase , variance_epsilon=_lowerCamelCase , )
return outputs
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=-1 ):
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_lowerCAmelCase : Optional[Any] = tf.shape(_lowerCamelCase )
_lowerCAmelCase : int = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_lowerCAmelCase : int = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , tf.Tensor ):
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor(_lowerCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_lowerCAmelCase : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_lowerCAmelCase : Optional[int] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_lowerCAmelCase : int = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "input_ids" ):
'''simple docstring'''
tf.debugging.assert_less(
_lowerCamelCase , tf.cast(_lowerCamelCase , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_lowerCamelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_lowerCAmelCase : Tuple = [x for x in data if len(_lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
_lowerCAmelCase : Tuple = np.asarray(_lowerCamelCase )
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : List[Any] = np.array_split(_lowerCamelCase , _lowerCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_lowerCAmelCase : Tuple = np.array_split(_lowerCamelCase , _lowerCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_lowerCamelCase ):
_lowerCAmelCase : str = chunk_data
else:
_lowerCAmelCase : Dict = data
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if name in group.attrs:
_lowerCAmelCase : int = [n.decode('utf8' ) if hasattr(_lowerCamelCase , 'decode' ) else n for n in group.attrs[name]]
else:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Any = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(_lowerCamelCase , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
def _expand_single_ad_tensor(_lowerCamelCase ):
if isinstance(_lowerCamelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_lowerCamelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _lowerCamelCase )
| 259 | 1 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [2, 1, 2, -1]
__SCREAMING_SNAKE_CASE : Optional[Any] = [1, 2, 3, 4]
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = len(self.first_signal )
__SCREAMING_SNAKE_CASE : int = len(self.second_signal )
__SCREAMING_SNAKE_CASE : Tuple = max(_A , _A )
# create a zero matrix of max_length x max_length
__SCREAMING_SNAKE_CASE : List[str] = [[0] * max_length for i in range(_A )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_A ):
__SCREAMING_SNAKE_CASE : Any = deque(self.second_signal )
rotated_signal.rotate(_A )
for j, item in enumerate(_A ):
matrix[i][j] += item
# multiply the matrix with the first signal
__SCREAMING_SNAKE_CASE : str = np.matmul(np.transpose(_A ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_A , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 716 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , _A : Optional[int] , _A : Tuple=7 , _A : Optional[int]=3 , _A : Optional[Any]=18 , _A : Dict=30 , _A : str=400 , _A : Optional[int]=True , _A : str=None , _A : str=True , _A : str=None , _A : List[str]=True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = size if size is not None else {'''shortest_edge''': 20}
__SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : List[str] = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = min_resolution
__SCREAMING_SNAKE_CASE : Tuple = max_resolution
__SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize
__SCREAMING_SNAKE_CASE : int = size
__SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop
__SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size
__SCREAMING_SNAKE_CASE : Optional[int] = do_flip_channel_order
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MobileViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''center_crop''' ) )
self.assertTrue(hasattr(_A , '''do_flip_channel_order''' ) )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 131 | 0 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
lowerCamelCase__ = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
lowerCamelCase__ = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
lowerCamelCase__ = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token") , id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token") , id="sequence") , id="references"),
}) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict=4 , lowercase_ : int=False) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = compute_bleu(
reference_corpus=lowercase_ , translation_corpus=lowercase_ , max_order=lowercase_ , smooth=lowercase_)
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 547 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 547 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__A =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The column name of the images in the files.'} )
lowerCAmelCase__ = field(default=snake_case_ , metadata={'help': 'A folder containing the training data.'} )
lowerCAmelCase__ = field(default=snake_case_ , metadata={'help': 'A folder containing the validation data.'} )
lowerCAmelCase__ = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = {}
if self.train_dir is not None:
lowerCamelCase_ = self.train_dir
if self.validation_dir is not None:
lowerCamelCase_ = self.validation_dir
lowerCamelCase_ = data_files if data_files else None
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowerCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase__ = field(default=snake_case_ , metadata={'help': 'Name or path of preprocessor config.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCAmelCase__ = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = field(
default=1e-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCamelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , lowerCamelCase__ , lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
lowerCamelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase_ = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCamelCase__ ) and data_args.train_val_split > 0.0:
lowerCamelCase_ = ds["train"].train_test_split(data_args.train_val_split )
lowerCamelCase_ = split["train"]
lowerCamelCase_ = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ = ViTMAEConfig.from_pretrained(model_args.config_name , **lowerCamelCase__ )
elif model_args.model_name_or_path:
lowerCamelCase_ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowerCamelCase__ )
else:
lowerCamelCase_ = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowerCamelCase_ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowerCamelCase__ )
elif model_args.model_name_or_path:
lowerCamelCase_ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowerCamelCase__ )
else:
lowerCamelCase_ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowerCamelCase_ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
lowerCamelCase_ = ViTMAEForPreTraining(lowerCamelCase__ )
if training_args.do_train:
lowerCamelCase_ = ds["train"].column_names
else:
lowerCamelCase_ = ds["validation"].column_names
if data_args.image_column_name is not None:
lowerCamelCase_ = data_args.image_column_name
elif "image" in column_names:
lowerCamelCase_ = "image"
elif "img" in column_names:
lowerCamelCase_ = "img"
else:
lowerCamelCase_ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowerCamelCase_ = image_processor.size["shortest_edge"]
else:
lowerCamelCase_ = (image_processor.size["height"], image_processor.size["width"])
lowerCamelCase_ = Compose(
[
Lambda(lambda lowerCamelCase__ : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowerCamelCase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowerCamelCase__ ):
lowerCamelCase_ = [transforms(lowerCamelCase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
lowerCamelCase_ = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowerCamelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
lowerCamelCase_ = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowerCamelCase__ )
# Compute absolute learning rate
lowerCamelCase_ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowerCamelCase_ = training_args.base_learning_rate * total_train_batch_size / 2_5_6
# Initialize our trainer
lowerCamelCase_ = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , )
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase_ = trainer.evaluate()
trainer.log_metrics("eval" , lowerCamelCase__ )
trainer.save_metrics("eval" , lowerCamelCase__ )
# Write model card and (optionally) push to hub
lowerCamelCase_ = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase__ )
else:
trainer.create_model_card(**lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 313 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
lowerCAmelCase__ = 'pixel_values'
lowerCAmelCase__ = False
lowerCAmelCase__ = TimmBackboneConfig
def __init__( self , lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(self , "timm" )
super().__init__(lowercase )
lowerCamelCase_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(f'backbone {config.backbone} is not supported by timm.' )
if hasattr(lowercase , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
lowerCamelCase_ = getattr(lowercase , "use_pretrained_backbone" , lowercase )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCamelCase_ = config.out_indices if getattr(lowercase , "out_indices" , lowercase ) is not None else (-1,)
lowerCamelCase_ = timm.create_model(
config.backbone , pretrained=lowercase , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowercase , **lowercase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCamelCase_ = self._backbone.return_layers
lowerCamelCase_ = {layer["module"]: str(lowercase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowercase )
@classmethod
def SCREAMING_SNAKE_CASE_( cls , lowercase , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCamelCase_ = kwargs.pop("config" , TimmBackboneConfig() )
lowerCamelCase_ = kwargs.pop("use_timm_backbone" , lowercase )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
lowerCamelCase_ = kwargs.pop("num_channels" , config.num_channels )
lowerCamelCase_ = kwargs.pop("features_only" , config.features_only )
lowerCamelCase_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
lowerCamelCase_ = kwargs.pop("out_indices" , config.out_indices )
lowerCamelCase_ = TimmBackboneConfig(
backbone=lowercase , num_channels=lowercase , features_only=lowercase , use_pretrained_backbone=lowercase , out_indices=lowercase , )
return super()._from_config(lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[Any]:
pass
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None , lowercase=None , lowercase=None , **lowercase ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCamelCase_ = self._all_layers
lowerCamelCase_ = self._backbone(lowercase , **lowercase )
lowerCamelCase_ = self._return_layers
lowerCamelCase_ = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCamelCase_ = self._backbone(lowercase , **lowercase )
lowerCamelCase_ = None
lowerCamelCase_ = tuple(lowercase )
lowerCamelCase_ = tuple(lowercase ) if hidden_states is not None else None
if not return_dict:
lowerCamelCase_ = (feature_maps,)
if output_hidden_states:
lowerCamelCase_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowercase , hidden_states=lowercase , attentions=lowercase )
| 313 | 1 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a_ :Tuple = False
class lowercase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = pipe(
image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 35 |
def _a ( a :list ) -> list:
if len(a ) < 2:
return collection
def circle_sort_util(a :list , a :int , a :int ) -> bool:
a = False
if low == high:
return swapped
a = low
a = high
while left < right:
if collection[left] > collection[right]:
a , a = (
collection[right],
collection[left],
)
a = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
a , a = (
collection[right + 1],
collection[left],
)
a = True
a = low + int((high - low) / 2 )
a = circle_sort_util(a , a , a )
a = circle_sort_util(a , mid + 1 , a )
return swapped or left_swap or right_swap
a = True
while is_not_sorted is True:
a = circle_sort_util(a , 0 , len(a ) - 1 )
return collection
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 117 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : int = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 392 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Any = tempfile.mkdtemp()
a__ : Tuple = 5
# Realm tok
a__ : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
a__ : Any = os.path.join(self.tmpdirname , 'realm_tokenizer')
os.makedirs(lowercase , exist_ok=lowercase)
a__ : int = os.path.join(lowercase , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
a__ : List[str] = os.path.join(self.tmpdirname , 'realm_block_records')
os.makedirs(lowercase , exist_ok=lowercase)
def __lowercase ( self) -> RealmTokenizer:
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer'))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : int = RealmConfig(num_block_records=self.num_block_records)
return config
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Tuple = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
})
return dataset
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = np.array(
[
b'This is the first record',
b'This is the second record',
b'This is the third record',
b'This is the fourth record',
b'This is the fifth record',
b'This is a longer longer longer record',
] , dtype=lowercase , )
return block_records
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Dict = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : List[Any] = self.get_config()
a__ : Tuple = self.get_dummy_retriever()
a__ : Tuple = retriever.tokenizer
a__ : str = np.array([0, 3] , dtype='long')
a__ : Optional[int] = tokenizer(['Test question']).input_ids
a__ : List[str] = tokenizer(
['the fourth'] , add_special_tokens=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , ).input_ids
a__ : str = config.reader_seq_len
a__ , a__ , a__ , a__ : int = retriever(
lowercase , lowercase , answer_ids=lowercase , max_length=lowercase , return_tensors='np')
self.assertEqual(len(lowercase) , 2)
self.assertEqual(len(lowercase) , 2)
self.assertEqual(len(lowercase) , 2)
self.assertEqual(concat_inputs.input_ids.shape , (2, 10))
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10))
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10))
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : List[str] = self.get_config()
a__ : Union[str, Any] = self.get_dummy_retriever()
a__ : List[Any] = retriever.tokenizer
a__ : Any = np.array([0, 3, 5] , dtype='long')
a__ : Tuple = tokenizer(['Test question']).input_ids
a__ : Optional[Any] = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , ).input_ids
a__ : Dict = config.reader_seq_len
a__ , a__ , a__ , a__ : Dict = retriever(
lowercase , lowercase , answer_ids=lowercase , max_length=lowercase , return_tensors='np')
self.assertEqual([False, True, True] , lowercase)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , lowercase)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
# Test local path
a__ : Optional[int] = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
self.assertEqual(retriever.block_records[0] , b'This is the first record')
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download') as mock_hf_hub_download:
a__ : str = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records') , _REALM_BLOCK_RECORDS_FILENAME)
a__ : str = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa')
self.assertEqual(retriever.block_records[0] , b'This is the first record')
| 392 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A : str = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = ['''LayoutLMv2FeatureExtractor''']
A : Optional[int] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 128 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any]=13 , SCREAMING_SNAKE_CASE : Any=7 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : str=99 , SCREAMING_SNAKE_CASE : Tuple=32 , SCREAMING_SNAKE_CASE : Dict=5 , SCREAMING_SNAKE_CASE : Optional[int]=4 , SCREAMING_SNAKE_CASE : int=37 , SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Tuple=0.1 , SCREAMING_SNAKE_CASE : List[str]=512 , SCREAMING_SNAKE_CASE : Optional[int]=16 , SCREAMING_SNAKE_CASE : str=2 , SCREAMING_SNAKE_CASE : str=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : Dict=4 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
_A : Optional[int] = parent
_A : List[Any] = batch_size
_A : int = seq_length
_A : List[str] = is_training
_A : str = use_input_mask
_A : Any = use_token_type_ids
_A : List[str] = use_labels
_A : Optional[Any] = vocab_size
_A : Tuple = hidden_size
_A : Dict = num_hidden_layers
_A : int = num_attention_heads
_A : Union[str, Any] = intermediate_size
_A : str = hidden_act
_A : Tuple = hidden_dropout_prob
_A : Tuple = attention_probs_dropout_prob
_A : Tuple = max_position_embeddings
_A : Any = type_vocab_size
_A : Optional[int] = type_sequence_label_size
_A : Any = initializer_range
_A : Tuple = num_labels
_A : List[str] = num_choices
_A : Any = scope
def A ( self : Union[str, Any]):
_A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_A : Optional[Any] = None
if self.use_input_mask:
_A : str = random_attention_mask([self.batch_size, self.seq_length])
_A : Dict = None
if self.use_token_type_ids:
_A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_A : Optional[int] = None
_A : Tuple = None
_A : Optional[int] = None
if self.use_labels:
_A : str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_A : int = ids_tensor([self.batch_size] , self.num_choices)
_A : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : List[Any]):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def A ( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int):
_A : str = BioGptModel(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE)
_A : Optional[int] = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , ):
_A : int = BioGptForCausalLM(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Optional[int] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , *SCREAMING_SNAKE_CASE : List[Any]):
_A : int = BioGptModel(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
# create attention mask
_A : Any = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE)
_A : List[Any] = self.seq_length // 2
_A : Any = 0
# first forward pass
_A , _A : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE).to_tuple()
# create hypothetical next token and extent to next_input_ids
_A : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
_A : Any = ids_tensor((1,) , SCREAMING_SNAKE_CASE).item() + 1
_A : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
_A : int = random_other_next_tokens
# append to next input_ids and attn_mask
_A : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
_A : Dict = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=SCREAMING_SNAKE_CASE)] , dim=1 , )
# get two different outputs
_A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE)['last_hidden_state']
_A : List[Any] = model(SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE)['last_hidden_state']
# select random slice
_A : List[Any] = ids_tensor((1,) , output_from_past.shape[-1]).item()
_A : str = output_from_no_past[:, -1, random_slice_idx].detach()
_A : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3))
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , *SCREAMING_SNAKE_CASE : List[Any]):
_A : int = BioGptModel(config=SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE).eval()
_A : Any = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE)
# first forward pass
_A : Tuple = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE)
_A , _A : str = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_A : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size)
_A : List[str] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
_A : Any = torch.cat([input_ids, next_tokens] , dim=-1)
_A : Tuple = torch.cat([attention_mask, next_attn_mask] , dim=-1)
_A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE)['last_hidden_state']
_A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE)[
'last_hidden_state'
]
# select random slice
_A : Dict = ids_tensor((1,) , output_from_past.shape[-1]).item()
_A : int = output_from_no_past[:, -3:, random_slice_idx].detach()
_A : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3))
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , *SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str]=False):
_A : List[str] = BioGptForCausalLM(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_A : Dict = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : Any , *SCREAMING_SNAKE_CASE : List[Any]):
_A : Optional[Any] = BioGptModel(SCREAMING_SNAKE_CASE)
_A : List[str] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.01)
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , *SCREAMING_SNAKE_CASE : Optional[Any]):
_A : List[Any] = self.num_labels
_A : List[str] = BioGptForTokenClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Optional[int] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A ( self : Dict):
_A : str = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : int = config_and_inputs
_A : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
a = (BioGptForCausalLM,) if is_torch_available() else ()
a = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
a = False
def A ( self : Dict):
_A : Optional[int] = BioGptModelTester(self)
_A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37)
def A ( self : Dict):
self.config_tester.run_common_tests()
def A ( self : str):
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE)
def A ( self : List[Any]):
_A : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A : Dict = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE)
def A ( self : str):
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*SCREAMING_SNAKE_CASE)
def A ( self : Optional[Any]):
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*SCREAMING_SNAKE_CASE , gradient_checkpointing=SCREAMING_SNAKE_CASE)
def A ( self : Optional[int]):
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*SCREAMING_SNAKE_CASE)
def A ( self : List[Any]):
_A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*SCREAMING_SNAKE_CASE)
def A ( self : Optional[Any]):
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*SCREAMING_SNAKE_CASE)
@slow
def A ( self : List[Any]):
_A : int = BioGptForCausalLM.from_pretrained('microsoft/biogpt')
model.to(SCREAMING_SNAKE_CASE)
_A : Tuple = BioGptTokenizer.from_pretrained('microsoft/biogpt')
_A : Optional[Any] = 'left'
# Define PAD Token = EOS Token = 50256
_A : Tuple = tokenizer.eos_token
_A : str = model.config.eos_token_id
# use different length sentences to test batching
_A : Dict = [
'Hello, my dog is a little',
'Today, I',
]
_A : int = tokenizer(SCREAMING_SNAKE_CASE , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE)
_A : List[str] = inputs['input_ids'].to(SCREAMING_SNAKE_CASE)
_A : Dict = model.generate(
input_ids=SCREAMING_SNAKE_CASE , attention_mask=inputs['attention_mask'].to(SCREAMING_SNAKE_CASE) , )
_A : Tuple = tokenizer(sentences[0] , return_tensors='pt').input_ids.to(SCREAMING_SNAKE_CASE)
_A : Dict = model.generate(input_ids=SCREAMING_SNAKE_CASE)
_A : List[str] = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
_A : Any = tokenizer(sentences[1] , return_tensors='pt').input_ids.to(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_length=model.config.max_length - num_paddings)
_A : Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE)
_A : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE)
_A : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE)
_A : List[str] = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence])
@slow
def A ( self : int):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Union[str, Any] = BioGptModel.from_pretrained(SCREAMING_SNAKE_CASE)
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
def A ( self : Optional[Any]):
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_A : int = 3
_A : Dict = input_dict['input_ids']
_A : Dict = input_ids.ne(1).to(SCREAMING_SNAKE_CASE)
_A : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
_A : List[Any] = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : List[str] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A ( self : Any):
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = 3
_A : Optional[int] = 'multi_label_classification'
_A : str = input_dict['input_ids']
_A : int = input_ids.ne(1).to(SCREAMING_SNAKE_CASE)
_A : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
_A : Optional[int] = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Tuple = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : str):
_A : List[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt')
_A : Dict = torch.tensor([[2, 4805, 9, 656, 21]])
_A : Optional[Any] = model(SCREAMING_SNAKE_CASE)[0]
_A : Optional[Any] = 42384
_A : int = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE)
_A : str = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4))
@slow
def A ( self : Optional[int]):
_A : Dict = BioGptTokenizer.from_pretrained('microsoft/biogpt')
_A : Any = BioGptForCausalLM.from_pretrained('microsoft/biogpt')
model.to(SCREAMING_SNAKE_CASE)
torch.manual_seed(0)
_A : List[Any] = tokenizer('COVID-19 is' , return_tensors='pt').to(SCREAMING_SNAKE_CASE)
_A : Optional[int] = model.generate(
**SCREAMING_SNAKE_CASE , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=SCREAMING_SNAKE_CASE , )
_A : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE)
_A : List[str] = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
| 128 | 1 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCAmelCase : List[Any] = 16
__lowerCAmelCase : Any = 32
def a_ (_lowerCAmelCase : Optional[int] )-> str:
return int(x / 2**20 )
class lowerCamelCase :
def __enter__( self ) -> Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
snake_case: List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *__lowerCamelCase ) -> Tuple:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
snake_case: Union[str, Any] = torch.cuda.memory_allocated()
snake_case: Union[str, Any] = torch.cuda.max_memory_allocated()
snake_case: Any = bamb(self.end - self.begin )
snake_case: str = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a_ (_lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , )-> int:
snake_case: Tuple = AutoTokenizer.from_pretrained(_lowerCAmelCase )
snake_case: List[str] = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": F"train[:{n_train}]", """validation""": F"validation[:{n_val}]"} )
def tokenize_function(_lowerCAmelCase : Tuple ):
# max_length=None => use the model max length (it's actually the default)
snake_case: List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case: List[str] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case: Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case: Tuple = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
snake_case: Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ (_lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] )-> Optional[Any]:
# Initialize accelerator
snake_case: List[str] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case: List[str] = config["""lr"""]
snake_case: Any = int(config["""num_epochs"""] )
snake_case: Any = int(config["""seed"""] )
snake_case: str = int(config["""batch_size"""] )
snake_case: Dict = args.model_name_or_path
set_seed(_lowerCAmelCase )
snake_case: str = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case: str = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
snake_case: Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case: int = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
snake_case: List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
snake_case: List[str] = 1
snake_case: Optional[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case: int = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
snake_case: int = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case: Optional[Any] = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
snake_case: Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case: Optional[Any] = 0
# Now we train the model
snake_case: Tuple = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
snake_case: int = model(**_lowerCAmelCase )
snake_case: Dict = outputs.loss
snake_case: Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
snake_case: Union[str, Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ()-> Tuple:
snake_case: int = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=_lowerCAmelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_lowerCAmelCase , )
parser.add_argument(
"""--output_dir""" , type=_lowerCAmelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=_lowerCAmelCase , default=320 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=_lowerCAmelCase , default=160 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=_lowerCAmelCase , default=1 , help="""Number of train epochs.""" , )
snake_case: Optional[int] = parser.parse_args()
snake_case: Tuple = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 701 | import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__lowerCAmelCase : List[Any] = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__lowerCAmelCase : str = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def a_ (_lowerCAmelCase : Optional[Any] )-> Optional[int]:
snake_case: Dict = (images / 2 + 0.5).clamp(0 , 1 )
snake_case: Optional[int] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case: int = numpy_to_pil(_lowerCAmelCase )
return images
def a_ (_lowerCAmelCase : Union[str, Any] )-> Dict:
if images.ndim == 3:
snake_case: List[Any] = images[None, ...]
snake_case: str = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
snake_case: int = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
snake_case: Dict = [Image.fromarray(_lowerCAmelCase ) for image in images]
return pil_images
| 164 | 0 |
import argparse
import json
from tqdm import tqdm
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=lowerCamelCase_ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=lowerCamelCase_ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=lowerCamelCase_ , help='where to store parsed gold_data_path file' , )
SCREAMING_SNAKE_CASE_ : List[str] = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.load(lowerCamelCase_ )
for dpr_record in tqdm(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = dpr_record['question']
SCREAMING_SNAKE_CASE_ : List[Any] = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(lowerCamelCase_ ) + '\n' )
if __name__ == "__main__":
main()
| 105 |
"""simple docstring"""
def __snake_case ( __A : int , __A : int ) -> float:
'''simple docstring'''
return base * power(__A , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
A_ : str = int(input('Enter the base: ').strip())
A_ : Dict = int(input('Enter the exponent: ').strip())
A_ : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
A_ : Any = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 265 | 0 |
from math import sqrt
def snake_case (__lowercase ) -> int:
'''simple docstring'''
_snake_case : str = 0
for i in range(1 , int(sqrt(__lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(__lowercase ):
total += i + n // i
elif i == sqrt(__lowercase ):
total += i
return total - n
def snake_case (__lowercase = 10_000 ) -> int:
'''simple docstring'''
_snake_case : str = sum(
i
for i in range(1 , __lowercase )
if sum_of_divisors(sum_of_divisors(__lowercase ) ) == i and sum_of_divisors(__lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 714 | import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ):
_snake_case : Any = parent
_snake_case : int = out_indices if out_indices is not None else [4]
_snake_case : Any = stage_names
_snake_case : Optional[Any] = out_features
_snake_case : Dict = backbone
_snake_case : List[str] = batch_size
_snake_case : Optional[int] = image_size
_snake_case : str = num_channels
_snake_case : Optional[Any] = use_pretrained_backbone
_snake_case : str = is_training
def UpperCamelCase ( self ):
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[str] = self.get_config()
return config, pixel_values
def UpperCamelCase ( self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Dict = TimmBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
_snake_case : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def UpperCamelCase ( self ):
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case ,_snake_case : List[Any] = config_and_inputs
_snake_case : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TimmBackbone,) if is_torch_available() else ()
_lowerCamelCase = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
_snake_case : Dict = TimmBackboneModelTester(self )
_snake_case : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
_snake_case : Dict = "resnet18"
_snake_case : Tuple = "microsoft/resnet-18"
_snake_case : Tuple = AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ )
_snake_case : List[str] = AutoBackbone.from_pretrained(lowercase_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_snake_case : List[str] = AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3] )
_snake_case : Optional[int] = AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def UpperCamelCase ( self ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def UpperCamelCase ( self ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def UpperCamelCase ( self ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def UpperCamelCase ( self ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def UpperCamelCase ( self ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def UpperCamelCase ( self ):
pass
@unittest.skip("Safetensors is not supported by timm." )
def UpperCamelCase ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(lowercase_ )
_snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Union[str, Any] = [*signature.parameters.keys()]
_snake_case : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = True
_snake_case : Optional[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
_snake_case : Dict = self.all_model_classes[0]
_snake_case : List[Any] = model_class(lowercase_ )
model.to(lowercase_ )
_snake_case : List[str] = self._prepare_for_class(lowercase_ , lowercase_ )
_snake_case : List[Any] = model(**lowercase_ )
_snake_case : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
_snake_case : List[Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_snake_case : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case : Any = model(**lowercase_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_snake_case : Union[str, Any] = copy.deepcopy(lowercase_ )
_snake_case : int = None
_snake_case : Optional[int] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case : int = model(**lowercase_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_snake_case : Dict = copy.deepcopy(lowercase_ )
_snake_case : Dict = False
_snake_case : List[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case : Any = model(**lowercase_ ) | 580 | 0 |
from random import randint, random
def _SCREAMING_SNAKE_CASE ( a , a , a , a = False , a = False , a = 5 , ) -> list:
__A : List[str] = [[-1] * number_of_cells] # Create a highway without any car
__A : List[Any] = 0
__A : Optional[Any] = max(UpperCAmelCase__ , 0 )
while i < number_of_cells:
__A : Union[str, Any] = (
randint(0 , UpperCAmelCase__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _SCREAMING_SNAKE_CASE ( a , a ) -> int:
__A : str = 0
__A : int = highway_now[car_index + 1 :]
for cell in range(len(UpperCAmelCase__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(UpperCAmelCase__ , -1 )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> list:
__A : str = len(UpperCAmelCase__ )
# Beforce calculations, the highway is empty
__A : Optional[Any] = [-1] * number_of_cells
for car_index in range(UpperCAmelCase__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__A : List[str] = min(highway_now[car_index] + 1 , UpperCAmelCase__ )
# Number of empty cell before the next car
__A : List[Any] = get_distance(UpperCAmelCase__ , UpperCAmelCase__ ) - 1
# We can't have the car causing an accident
__A : str = min(next_highway[car_index] , UpperCAmelCase__ )
if random() < probability:
# Randomly, a driver will slow down
__A : Optional[int] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> list:
__A : int = len(highway[0] )
for i in range(UpperCAmelCase__ ):
__A : Optional[int] = update(highway[i] , UpperCAmelCase__ , UpperCAmelCase__ )
__A : Optional[Any] = [-1] * number_of_cells
for car_index in range(UpperCAmelCase__ ):
__A : Optional[int] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__A : Optional[int] = (car_index + speed) % number_of_cells
# Commit the change of position
__A : int = speed
highway.append(UpperCAmelCase__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 239 |
import os
import sys
lowercase = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : List[str] ) -> List[str]:
return AutoConfig.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] ) -> Any:
return AutoTokenizer.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Dict ) -> Union[str, Any]:
return AutoModel.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] ) -> Optional[Any]:
return AutoModelForCausalLM.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Dict ) -> int:
return AutoModelForMaskedLM.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Dict ) -> Dict:
return AutoModelForSequenceClassification.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : Any , **UpperCAmelCase__ : List[str] ) -> Union[str, Any]:
return AutoModelForQuestionAnswering.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 272 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a ( lowerCamelCase__ ) -> str:
return "".join(sorted(lowerCamelCase__ ) )
def _a ( lowerCamelCase__ ) -> list[str]:
return word_by_signature[signature(lowerCamelCase__ )]
UpperCamelCase = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
UpperCamelCase = sorted({word.strip().lower() for word in data.splitlines()})
UpperCamelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
UpperCamelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 721 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__ ( UpperCAmelCase ):
def UpperCAmelCase_ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = SMALL_MODEL_IDENTIFIER
lowerCamelCase_ : str = 'pt'
lowerCamelCase_ : List[Any] = 'tf'
def UpperCAmelCase_ (self : List[str] , _snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_snake_case )
def UpperCAmelCase_ (self : Union[str, Any] , _snake_case : Optional[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=_snake_case )
model_tf.save_pretrained(_snake_case )
def UpperCAmelCase_ (self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : List[Any] = 'mock_framework'
# Framework provided - return whatever the user provides
lowerCamelCase_ : str = FeaturesManager.determine_framework(self.test_model , _snake_case )
self.assertEqual(_snake_case , _snake_case )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_snake_case )
lowerCamelCase_ : Optional[Any] = FeaturesManager.determine_framework(_snake_case , _snake_case )
self.assertEqual(_snake_case , _snake_case )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_snake_case )
lowerCamelCase_ : Optional[Any] = FeaturesManager.determine_framework(_snake_case , _snake_case )
self.assertEqual(_snake_case , _snake_case )
def UpperCAmelCase_ (self : Tuple ) -> int:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_snake_case )
lowerCamelCase_ : str = FeaturesManager.determine_framework(_snake_case )
self.assertEqual(_snake_case , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_snake_case )
lowerCamelCase_ : List[str] = FeaturesManager.determine_framework(_snake_case )
self.assertEqual(_snake_case , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_snake_case ):
lowerCamelCase_ : int = FeaturesManager.determine_framework(_snake_case )
def UpperCAmelCase_ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = MagicMock(return_value=_snake_case )
with patch('transformers.onnx.features.is_tf_available' , _snake_case ):
lowerCamelCase_ : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_snake_case , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCamelCase_ : str = MagicMock(return_value=_snake_case )
with patch('transformers.onnx.features.is_torch_available' , _snake_case ):
lowerCamelCase_ : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_snake_case , self.framework_tf )
# Both in environment -> use PyTorch
lowerCamelCase_ : Optional[Any] = MagicMock(return_value=_snake_case )
lowerCamelCase_ : Optional[Any] = MagicMock(return_value=_snake_case )
with patch('transformers.onnx.features.is_tf_available' , _snake_case ), patch(
'transformers.onnx.features.is_torch_available' , _snake_case ):
lowerCamelCase_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_snake_case , self.framework_pt )
# Both not in environment -> raise error
lowerCamelCase_ : Union[str, Any] = MagicMock(return_value=_snake_case )
lowerCamelCase_ : Optional[int] = MagicMock(return_value=_snake_case )
with patch('transformers.onnx.features.is_tf_available' , _snake_case ), patch(
'transformers.onnx.features.is_torch_available' , _snake_case ):
with self.assertRaises(_snake_case ):
lowerCamelCase_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
| 144 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__lowerCamelCase )
__lowercase = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__lowerCamelCase )
env_command_parser(subparsers=__lowerCamelCase )
launch_command_parser(subparsers=__lowerCamelCase )
tpu_command_parser(subparsers=__lowerCamelCase )
test_command_parser(subparsers=__lowerCamelCase )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(__lowerCamelCase , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__lowerCamelCase )
if __name__ == "__main__":
main() | 534 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=0.999, __lowerCamelCase="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = i / num_diffusion_timesteps
_SCREAMING_SNAKE_CASE : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCamelCase ) / alpha_bar_fn(__lowerCamelCase ), __lowerCamelCase ) )
return torch.tensor(__lowerCamelCase, dtype=torch.floataa )
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
__snake_case = [e.name for e in KarrasDiffusionSchedulers]
__snake_case = 2
@register_to_config
def __init__( self , __lowerCamelCase = 1_0_0_0 , __lowerCamelCase = 0.0_0085 , __lowerCamelCase = 0.012 , __lowerCamelCase = "linear" , __lowerCamelCase = None , __lowerCamelCase = "epsilon" , __lowerCamelCase = "linspace" , __lowerCamelCase = 0 , ) -> int:
if trained_betas is not None:
_SCREAMING_SNAKE_CASE : Dict = torch.tensor(__lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_SCREAMING_SNAKE_CASE : List[str] = torch.linspace(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_SCREAMING_SNAKE_CASE : str = betas_for_alpha_bar(__lowerCamelCase )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_SCREAMING_SNAKE_CASE : int = 1.0 - self.betas
_SCREAMING_SNAKE_CASE : str = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> Dict:
if schedule_timesteps is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.timesteps
_SCREAMING_SNAKE_CASE : Tuple = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_SCREAMING_SNAKE_CASE : str = 1 if len(__lowerCamelCase ) > 1 else 0
else:
_SCREAMING_SNAKE_CASE : Dict = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
_SCREAMING_SNAKE_CASE : Dict = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase_ ( self ) -> List[str]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , ) -> torch.FloatTensor:
_SCREAMING_SNAKE_CASE : Tuple = self.index_for_timestep(__lowerCamelCase )
if self.state_in_first_order:
_SCREAMING_SNAKE_CASE : List[str] = self.sigmas[step_index]
else:
_SCREAMING_SNAKE_CASE : List[Any] = self.sigmas_interpol[step_index]
_SCREAMING_SNAKE_CASE : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[Any] = num_inference_steps
_SCREAMING_SNAKE_CASE : List[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_SCREAMING_SNAKE_CASE : str = np.linspace(0 , num_train_timesteps - 1 , __lowerCamelCase , dtype=__lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_SCREAMING_SNAKE_CASE : int = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_SCREAMING_SNAKE_CASE : Optional[Any] = (np.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_SCREAMING_SNAKE_CASE : Optional[Any] = (np.arange(__lowerCamelCase , 0 , -step_ratio )).round().copy().astype(__lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(np.log(__lowerCamelCase ) ).to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = np.interp(__lowerCamelCase , np.arange(0 , len(__lowerCamelCase ) ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase )
# interpolate sigmas
_SCREAMING_SNAKE_CASE : str = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_SCREAMING_SNAKE_CASE : List[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_SCREAMING_SNAKE_CASE : Tuple = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__lowerCamelCase ).startswith("mps" ):
# mps does not support float64
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase , dtype=torch.floataa )
else:
_SCREAMING_SNAKE_CASE : str = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
# interpolate timesteps
_SCREAMING_SNAKE_CASE : Optional[int] = self.sigma_to_t(__lowerCamelCase ).to(__lowerCamelCase , dtype=timesteps.dtype )
_SCREAMING_SNAKE_CASE : List[str] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_SCREAMING_SNAKE_CASE : int = torch.cat([timesteps[:1], interleaved_timesteps] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_SCREAMING_SNAKE_CASE : Dict = defaultdict(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
# get log sigma
_SCREAMING_SNAKE_CASE : Optional[int] = sigma.log()
# get distribution
_SCREAMING_SNAKE_CASE : Union[str, Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_SCREAMING_SNAKE_CASE : Any = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_SCREAMING_SNAKE_CASE : int = low_idx + 1
_SCREAMING_SNAKE_CASE : Optional[int] = self.log_sigmas[low_idx]
_SCREAMING_SNAKE_CASE : Optional[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
_SCREAMING_SNAKE_CASE : List[str] = (low - log_sigma) / (low - high)
_SCREAMING_SNAKE_CASE : Optional[Any] = w.clamp(0 , 1 )
# transform interpolation to time range
_SCREAMING_SNAKE_CASE : List[str] = (1 - w) * low_idx + w * high_idx
_SCREAMING_SNAKE_CASE : List[str] = t.view(sigma.shape )
return t
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return self.sample is None
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True , ) -> Union[SchedulerOutput, Tuple]:
_SCREAMING_SNAKE_CASE : int = self.index_for_timestep(__lowerCamelCase )
# advance index counter by 1
_SCREAMING_SNAKE_CASE : int = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_SCREAMING_SNAKE_CASE : Tuple = self.sigmas[step_index]
_SCREAMING_SNAKE_CASE : Any = self.sigmas_interpol[step_index + 1]
_SCREAMING_SNAKE_CASE : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_SCREAMING_SNAKE_CASE : Any = self.sigmas[step_index - 1]
_SCREAMING_SNAKE_CASE : Tuple = self.sigmas_interpol[step_index]
_SCREAMING_SNAKE_CASE : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
_SCREAMING_SNAKE_CASE : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_SCREAMING_SNAKE_CASE : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol
_SCREAMING_SNAKE_CASE : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_SCREAMING_SNAKE_CASE : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
_SCREAMING_SNAKE_CASE : str = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_SCREAMING_SNAKE_CASE : int = sigma_interpol - sigma_hat
# store for 2nd order step
_SCREAMING_SNAKE_CASE : Optional[int] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_SCREAMING_SNAKE_CASE : List[str] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_SCREAMING_SNAKE_CASE : str = sigma_next - sigma_hat
_SCREAMING_SNAKE_CASE : Any = self.sample
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : int = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase ):
# mps does not support float64
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_SCREAMING_SNAKE_CASE : Dict = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_SCREAMING_SNAKE_CASE : List[str] = self.timesteps.to(original_samples.device )
_SCREAMING_SNAKE_CASE : Union[str, Any] = timesteps.to(original_samples.device )
_SCREAMING_SNAKE_CASE : List[Any] = [self.index_for_timestep(__lowerCamelCase , __lowerCamelCase ) for t in timesteps]
_SCREAMING_SNAKE_CASE : Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_SCREAMING_SNAKE_CASE : Optional[Any] = sigma.unsqueeze(-1 )
_SCREAMING_SNAKE_CASE : str = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Optional[Any]:
return self.config.num_train_timesteps | 249 | 0 |
"""simple docstring"""
from functools import lru_cache
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> set:
"""simple docstring"""
_UpperCAmelCase = 2
_UpperCAmelCase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return len(unique_prime_factors(SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return len(set(SCREAMING_SNAKE_CASE ) ) in (0, 1)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> list:
"""simple docstring"""
_UpperCAmelCase = 2
while True:
# Increment each value of a generated range
_UpperCAmelCase = [base + i for i in range(SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_UpperCAmelCase = [upf_len(SCREAMING_SNAKE_CASE ) for x in group]
checker.append(SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def __lowerCamelCase ( SCREAMING_SNAKE_CASE = 4 ) -> int:
"""simple docstring"""
_UpperCAmelCase = run(SCREAMING_SNAKE_CASE )
return results[0] if len(SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution())
| 494 |
"""simple docstring"""
import argparse
import os
import re
lowerCAmelCase_ = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase_ = re.compile(r'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
lowerCAmelCase_ = re.compile(r'''\s*\(\s*"(\S[^"]+)"''')
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE = False ) -> Optional[Any]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE,'r',encoding='utf-8' ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = content.split('\n' )
_UpperCAmelCase = []
_UpperCAmelCase = 0
while line_idx < len(SCREAMING_SNAKE_CASE ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_UpperCAmelCase = len(re.search(R'^(\s*)\S',lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
_UpperCAmelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_UpperCAmelCase = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_UpperCAmelCase = sorted(SCREAMING_SNAKE_CASE,key=lambda SCREAMING_SNAKE_CASE : _re_identifier.search(SCREAMING_SNAKE_CASE ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(SCREAMING_SNAKE_CASE,'w',encoding='utf-8' ) as f:
f.write('\n'.join(SCREAMING_SNAKE_CASE ) )
elif "\n".join(SCREAMING_SNAKE_CASE ) != content:
return True
def __lowerCamelCase ( SCREAMING_SNAKE_CASE = False ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) for f in os.listdir(SCREAMING_SNAKE_CASE ) if f.endswith('.py' )]
_UpperCAmelCase = [sort_auto_mapping(SCREAMING_SNAKE_CASE,overwrite=SCREAMING_SNAKE_CASE ) for fname in fnames]
if not overwrite and any(SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = [f for f, d in zip(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(SCREAMING_SNAKE_CASE )}. Run `make style` to fix"""
' this.' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 494 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __A ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case ):
_lowerCAmelCase : Tuple = ZeroShotClassificationPipeline(
model=_snake_case , tokenizer=_snake_case , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
_lowerCAmelCase : Any = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(_snake_case , {"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case )], "scores": [ANY(_snake_case )]} )
# No kwarg
_lowerCAmelCase : Tuple = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(_snake_case , {"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case )], "scores": [ANY(_snake_case )]} )
_lowerCAmelCase : int = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(_snake_case , {"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case )], "scores": [ANY(_snake_case )]} )
_lowerCAmelCase : Optional[Any] = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
_snake_case , {"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case ), ANY(_snake_case )], "scores": [ANY(_snake_case ), ANY(_snake_case )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
_lowerCAmelCase : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
_snake_case , {"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case ), ANY(_snake_case )], "scores": [ANY(_snake_case ), ANY(_snake_case )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
_lowerCAmelCase : List[str] = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(_snake_case , {"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case )], "scores": [ANY(_snake_case )]} )
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase : List[Any] = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
_snake_case , [
{"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case ), ANY(_snake_case )], "scores": [ANY(_snake_case ), ANY(_snake_case )]}
for i in range(1 )
] , )
_lowerCAmelCase : List[str] = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
_snake_case , [
{"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case ), ANY(_snake_case )], "scores": [ANY(_snake_case ), ANY(_snake_case )]}
for i in range(2 )
] , )
with self.assertRaises(_snake_case ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(_snake_case ):
classifier(_snake_case , candidate_labels="politics" )
with self.assertRaises(_snake_case ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(_snake_case ):
classifier("Who are you voting for in 2020?" , candidate_labels=_snake_case )
with self.assertRaises(_snake_case ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(_snake_case ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=_snake_case , )
self.run_entailment_id(_snake_case )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : str = zero_shot_classifier.model.config
_lowerCAmelCase : Any = config.labelaid
_lowerCAmelCase : str = zero_shot_classifier.entailment_id
_lowerCAmelCase : List[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_lowerCAmelCase : Optional[Any] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_lowerCAmelCase : str = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_lowerCAmelCase : Dict = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(_snake_case , zero_shot_classifier.entailment_id )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : str = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : str = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_snake_case ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_snake_case ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Tuple = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_snake_case ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
_lowerCAmelCase : Any = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_snake_case ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
_lowerCAmelCase : str = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
| 424 | from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
snake_case = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
snake_case = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
snake_case = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case = 1 , _snake_case = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case , hypotheses=_snake_case , min_len=_snake_case , max_len=_snake_case )
}
| 424 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if not nums:
return 0
SCREAMING_SNAKE_CASE : int = nums[0]
SCREAMING_SNAKE_CASE : int = 0
for num in nums[1:]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = (
max_excluding + num,
max(lowercase , lowercase ),
)
return max(lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [0] * len(lowercase )
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Any = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase ) ):
if indegree[i] == 0:
queue.append(lowercase )
while queue:
SCREAMING_SNAKE_CASE : Optional[Any] = queue.pop(0 )
cnt += 1
topo.append(lowercase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowercase )
if cnt != len(lowercase ):
print("Cycle exists" )
else:
print(lowercase )
# Adjacency List of Graph
snake_case = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 488 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class _a ( UpperCamelCase__ ):
_lowercase : str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowercase : ClassVar[Features] = Features({'''audio''': Audio()} )
_lowercase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
_lowercase : str = "audio"
_lowercase : str = "labels"
def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple ) -> List[str]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , UpperCamelCase_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
lowercase__ = copy.deepcopy(self )
lowercase__ = self.label_schema.copy()
lowercase__ = features[self.label_column]
lowercase__ = label_schema
return task_template
@property
def lowerCamelCase_ ( self: Tuple ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 43 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _lowerCAmelCase ( __a ):
_lowercase =42
_lowercase =42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 290 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.json'}
a_ = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
a_ = {'mgp-str': 2_7}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCamelCase_ , UpperCamelCase_="[GO]" , UpperCamelCase_="[GO]" , UpperCamelCase_="[s]" , UpperCamelCase_="[GO]" , **UpperCamelCase_ ) -> Optional[Any]:
super().__init__(
unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
__lowercase : List[str] = json.load(UpperCamelCase_ )
__lowercase : int = {v: k for k, v in self.vocab.items()}
@property
def _lowerCamelCase ( self ) -> int:
return len(self.vocab )
def _lowerCamelCase ( self ) -> str:
return dict(self.vocab , **self.added_tokens_encoder )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]:
__lowercase : Any = []
for s in text:
char_tokens.extend(UpperCamelCase_ )
return char_tokens
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict:
return self.vocab.get(UpperCamelCase_ , self.vocab.get(self.unk_token ) )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]:
return self.decoder.get(UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(UpperCamelCase_ ) )
return
__lowercase : str = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 523 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Union[str, Any] = 10
def _lowerCamelCase ( self ) -> str:
__lowercase : List[str] = [1, 2, 3, 4]
__lowercase : List[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__lowercase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> int:
__lowercase : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__lowercase : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : List[Any] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
__lowercase ,__lowercase : Optional[Any] = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = ''''''
__lowercase ,__lowercase : Any = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
self.assertEqual(UpperCamelCase_ , [] )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : List[str] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
__lowercase ,__lowercase : int = process_story(UpperCamelCase_ )
__lowercase : Union[str, Any] = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : List[str] = ['''It was the best of times.''']
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Union[str, Any] = torch.tensor([1, 2, 3, 4] )
__lowercase : Union[str, Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 0 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Optional[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__lowercase : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 23 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__lowercase : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 1 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : List[Any] = 1_01
__lowercase : Union[str, Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
__lowercase : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__lowercase : Optional[int] = compute_token_type_ids(UpperCamelCase_ , UpperCamelCase_ )
np.testing.assert_array_equal(UpperCamelCase_ , UpperCamelCase_ )
| 523 | 1 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = BlenderbotSmallConfig
lowercase_ = {}
lowercase_ = "gelu"
def __init__(self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : str=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=99 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[str]=20 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : Tuple=0 , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =parent
lowerCamelCase__: Optional[int] =batch_size
lowerCamelCase__: Tuple =seq_length
lowerCamelCase__: Tuple =is_training
lowerCamelCase__: Union[str, Any] =use_labels
lowerCamelCase__: Optional[Any] =vocab_size
lowerCamelCase__: Optional[int] =hidden_size
lowerCamelCase__: int =num_hidden_layers
lowerCamelCase__: Union[str, Any] =num_attention_heads
lowerCamelCase__: Tuple =intermediate_size
lowerCamelCase__: Optional[Any] =hidden_dropout_prob
lowerCamelCase__: int =attention_probs_dropout_prob
lowerCamelCase__: List[Any] =max_position_embeddings
lowerCamelCase__: Tuple =eos_token_id
lowerCamelCase__: Tuple =pad_token_id
lowerCamelCase__: Optional[int] =bos_token_id
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
lowerCamelCase__: Any =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
lowerCamelCase__: List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
lowerCamelCase__: List[Any] =tf.concat([input_ids, eos_tensor] , axis=1)
lowerCamelCase__: str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCamelCase__: List[Any] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase__: Optional[Any] =prepare_blenderbot_small_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =TFBlenderbotSmallModel(config=UpperCAmelCase_).get_decoder()
lowerCamelCase__: str =inputs_dict["input_ids"]
lowerCamelCase__: Optional[int] =input_ids[:1, :]
lowerCamelCase__: List[str] =inputs_dict["attention_mask"][:1, :]
lowerCamelCase__: List[str] =inputs_dict["head_mask"]
lowerCamelCase__: Tuple =1
# first forward pass
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: int =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__: Dict =ids_tensor((self.batch_size, 3) , config.vocab_size)
lowerCamelCase__: Optional[int] =tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
lowerCamelCase__: List[str] =tf.concat([input_ids, next_tokens] , axis=-1)
lowerCamelCase__: str =tf.concat([attention_mask, next_attn_mask] , axis=-1)
lowerCamelCase__: List[str] =model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)[0]
lowerCamelCase__: Optional[Any] =model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
lowerCamelCase__: List[Any] =int(ids_tensor((1,) , output_from_past.shape[-1]))
lowerCamelCase__: Dict =output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__: Dict =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-3)
def lowerCAmelCase_ ( __a , __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__: List[str] =tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__: Any =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__: str =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__: List[Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__: List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
lowercase_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
lowercase_ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase_ = True
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =TFBlenderbotSmallModelTester(self)
lowerCamelCase__: Optional[int] =ConfigTester(self , config_class=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_)
@require_tokenizers
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
lowercase_ = "facebook/blenderbot_small-90M"
@cached_property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple:
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M")
@cached_property
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Dict =self.tokenizer(self.src_text , return_tensors="tf")
lowerCamelCase__: Optional[int] =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCAmelCase_ , )
lowerCamelCase__: Any =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCAmelCase_)[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 59 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['image_processor', 'tokenizer']
lowercase__ = 'ViTImageProcessor'
lowercase__ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __a=None , __a=None , **__a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __a , )
_UpperCamelCase = kwargs.pop('''feature_extractor''')
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(__a , __a)
def __call__( self , __a=None , __a=None , __a=None , __a=None , **__a) -> Tuple:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''')
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''')
if text is not None:
_UpperCamelCase = self.tokenizer(__a , return_tensors=__a , **__a)
if visual_prompt is not None:
_UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a)
if images is not None:
_UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a)
if visual_prompt is not None and images is not None:
_UpperCamelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_UpperCamelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__a) , tensor_type=__a)
def UpperCAmelCase ( self , *__a , **__a) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a)
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __a , )
return self.image_processor_class
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __a , )
return self.image_processor
| 19 | 0 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __UpperCamelCase ( a : Optional[Any] , a : int , a : List[Any] , a : Union[str, Any]=1024 ) ->Tuple:
snake_case , snake_case = [], []
snake_case = list(zip(a , a ) )
snake_case , snake_case = sorted_examples[0]
def is_too_big(a : Tuple ):
return tok(a , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
snake_case = new_src + ''' ''' + src
snake_case = new_tgt + ''' ''' + tgt
if is_too_big(a ) or is_too_big(a ): # cant fit, finalize example
finished_src.append(a )
finished_tgt.append(a )
snake_case , snake_case = src, tgt
else: # can fit, keep adding
snake_case , snake_case = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(a )
finished_tgt.append(a )
return finished_src, finished_tgt
def __UpperCamelCase ( a : List[str] , a : Path , a : Union[str, Any] , a : Optional[int] ) ->Optional[Any]:
snake_case = Path(a )
save_path.mkdir(exist_ok=a )
for split in ["train"]:
snake_case , snake_case = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
snake_case = [x.rstrip() for x in Path(a ).open().readlines()]
snake_case = [x.rstrip() for x in Path(a ).open().readlines()]
snake_case , snake_case = pack_examples(a , a , a , a )
print(f"""packed {split} split from {len(a )} examples -> {len(a )}.""" )
Path(save_path / f"""{split}.source""" ).open('''w''' ).write('''\n'''.join(a ) )
Path(save_path / f"""{split}.target""" ).open('''w''' ).write('''\n'''.join(a ) )
for split in ["val", "test"]:
snake_case , snake_case = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(a , save_path / f"""{split}.source""" )
shutil.copyfile(a , save_path / f"""{split}.target""" )
def __UpperCamelCase ( ) ->Any:
snake_case = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=a , default=128 )
parser.add_argument('''--data_dir''' , type=a )
parser.add_argument('''--save_path''' , type=a )
snake_case = parser.parse_args()
snake_case = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(a , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 44 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 44 | 1 |
def _a ( lowercase__ : int = 1_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = set()
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : Any = n + 1 # maximum limit
for a in range(2 , lowercase__ ):
for b in range(2 , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = a**b # calculates the current power
collect_powers.add(lowercase__ ) # adds the result to the set
return len(lowercase__ )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 85 | from __future__ import annotations
def _lowerCamelCase( __snake_case , __snake_case , __snake_case ) -> tuple[float, list[float]]:
__snake_case = list(range(len(__snake_case ) ) )
__snake_case = [v / w for v, w in zip(__snake_case , __snake_case )]
index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case )
__snake_case = 0
__snake_case = [0] * len(__snake_case )
for i in index:
if weight[i] <= capacity:
__snake_case = 1
max_value += value[i]
capacity -= weight[i]
else:
__snake_case = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 524 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''YolosFeatureExtractor''']
__lowercase = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 452 | import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Union[str, Any] = None
a__ : Tuple = None
@property
def UpperCamelCase__ ( self) -> Any:
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :str = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(__lowercase , '''feature_size'''))
self.assertTrue(hasattr(__lowercase , '''sampling_rate'''))
self.assertTrue(hasattr(__lowercase , '''padding_value'''))
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :str = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCamelCase :Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
__UpperCamelCase :Optional[Any] = feat_extract.model_input_names[0]
__UpperCamelCase :Union[str, Any] = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(__lowercase) == len(__lowercase) for x, y in zip(__lowercase , processed_features[input_name])))
__UpperCamelCase :Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowercase)
__UpperCamelCase :Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='''np''')
__UpperCamelCase :Any = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__UpperCamelCase :Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowercase)
__UpperCamelCase :Tuple = self.feature_extraction_class(**self.feat_extract_dict)
__UpperCamelCase :str = feat_extract.model_input_names[0]
__UpperCamelCase :Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''')
__UpperCamelCase :Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__UpperCamelCase :str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :str = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowercase)
__UpperCamelCase :str = self.feature_extraction_class(**self.feat_extract_dict)
__UpperCamelCase :Union[str, Any] = feat_extract.model_input_names[0]
__UpperCamelCase :Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''')
__UpperCamelCase :int = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__UpperCamelCase :List[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCamelCase__ ( self , __lowercase=False) -> Dict:
def _inputs_have_equal_length(__lowercase):
__UpperCamelCase :List[str] = len(input[0])
for input_slice in input[1:]:
if len(__lowercase) != length:
return False
return True
def _inputs_are_equal(__lowercase , __lowercase):
if len(__lowercase) != len(__lowercase):
return False
for input_slice_a, input_slice_a in zip(__lowercase , __lowercase):
if not np.allclose(np.asarray(__lowercase) , np.asarray(__lowercase) , atol=1E-3):
return False
return True
__UpperCamelCase :List[str] = self.feature_extraction_class(**self.feat_extract_dict)
__UpperCamelCase :Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(numpify=__lowercase)
__UpperCamelCase :Any = feat_extract.model_input_names[0]
__UpperCamelCase :Optional[Any] = BatchFeature({input_name: speech_inputs})
__UpperCamelCase :Optional[Any] = self.feat_extract_tester.seq_length_diff
__UpperCamelCase :Union[str, Any] = self.feat_extract_tester.max_seq_length + pad_diff
__UpperCamelCase :Tuple = self.feat_extract_tester.min_seq_length
__UpperCamelCase :Optional[int] = self.feat_extract_tester.batch_size
__UpperCamelCase :Any = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__UpperCamelCase :List[Any] = feat_extract.pad(__lowercase , padding=__lowercase)
__UpperCamelCase :Tuple = input_a[input_name]
__UpperCamelCase :int = feat_extract.pad(__lowercase , padding='''longest''')
__UpperCamelCase :int = input_a[input_name]
__UpperCamelCase :Optional[Any] = feat_extract.pad(__lowercase , padding='''max_length''' , max_length=len(speech_inputs[-1]))
__UpperCamelCase :Dict = input_a[input_name]
__UpperCamelCase :List[Any] = feat_extract.pad(__lowercase , padding='''longest''' , return_tensors='''np''')
__UpperCamelCase :Optional[int] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__lowercase):
feat_extract.pad(__lowercase , padding='''max_length''')[input_name]
__UpperCamelCase :Any = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=__lowercase , return_tensors='''np''')
__UpperCamelCase :Tuple = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__lowercase))
self.assertTrue(_inputs_have_equal_length(__lowercase))
self.assertTrue(_inputs_have_equal_length(__lowercase))
self.assertTrue(_inputs_are_equal(__lowercase , __lowercase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
__UpperCamelCase :int = feat_extract.pad(__lowercase , pad_to_multiple_of=10)
__UpperCamelCase :Tuple = input_a[input_name]
__UpperCamelCase :Optional[int] = feat_extract.pad(__lowercase , padding='''longest''' , pad_to_multiple_of=10)
__UpperCamelCase :Tuple = input_a[input_name]
__UpperCamelCase :str = feat_extract.pad(
__lowercase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=__lowercase)
__UpperCamelCase :Any = input_a[input_name]
__UpperCamelCase :List[str] = feat_extract.pad(
__lowercase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=__lowercase , return_tensors='''np''' , )
__UpperCamelCase :List[str] = input_a[input_name]
self.assertTrue(all(len(__lowercase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(__lowercase , __lowercase))
__UpperCamelCase :str = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(__lowercase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
__UpperCamelCase :Optional[Any] = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCamelCase__ ( self , __lowercase=False) -> Dict:
def _inputs_have_equal_length(__lowercase):
__UpperCamelCase :Dict = len(input[0])
for input_slice in input[1:]:
if len(__lowercase) != length:
return False
return True
def _inputs_are_equal(__lowercase , __lowercase):
if len(__lowercase) != len(__lowercase):
return False
for input_slice_a, input_slice_a in zip(__lowercase , __lowercase):
if not np.allclose(np.asarray(__lowercase) , np.asarray(__lowercase) , atol=1E-3):
return False
return True
__UpperCamelCase :str = self.feature_extraction_class(**self.feat_extract_dict)
__UpperCamelCase :Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=__lowercase)
__UpperCamelCase :Tuple = feat_extract.model_input_names[0]
__UpperCamelCase :Dict = BatchFeature({input_name: speech_inputs})
# truncate to smallest
__UpperCamelCase :List[Any] = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=len(speech_inputs[0]) , truncation=__lowercase)
__UpperCamelCase :str = input_a[input_name]
__UpperCamelCase :Optional[int] = feat_extract.pad(__lowercase , padding='''max_length''' , max_length=len(speech_inputs[0]))
__UpperCamelCase :List[str] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__lowercase))
self.assertFalse(_inputs_have_equal_length(__lowercase))
# truncate to smallest with np
__UpperCamelCase :Union[str, Any] = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=len(speech_inputs[0]) , return_tensors='''np''' , truncation=__lowercase , )
__UpperCamelCase :List[Any] = input_a[input_name]
__UpperCamelCase :int = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=len(speech_inputs[0]) , return_tensors='''np''')
__UpperCamelCase :Optional[Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__lowercase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__lowercase))
# truncate to middle
__UpperCamelCase :Optional[Any] = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=len(speech_inputs[1]) , truncation=__lowercase , return_tensors='''np''' , )
__UpperCamelCase :Dict = input_a[input_name]
__UpperCamelCase :str = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=len(speech_inputs[1]) , truncation=__lowercase)
__UpperCamelCase :Union[str, Any] = input_a[input_name]
__UpperCamelCase :Optional[Any] = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=len(speech_inputs[1]) , return_tensors='''np''')
__UpperCamelCase :Union[str, Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(__lowercase))
self.assertTrue(_inputs_have_equal_length(__lowercase))
self.assertTrue(_inputs_are_equal(__lowercase , __lowercase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__lowercase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowercase):
feat_extract.pad(__lowercase , truncation=__lowercase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowercase):
feat_extract.pad(__lowercase , padding='''longest''' , truncation=__lowercase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowercase):
feat_extract.pad(__lowercase , padding='''longest''' , truncation=__lowercase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__lowercase):
feat_extract.pad(__lowercase , padding='''max_length''' , truncation=__lowercase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__UpperCamelCase :Dict = 12
__UpperCamelCase :str = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=__lowercase , truncation=__lowercase , )
__UpperCamelCase :List[Any] = input_a[input_name]
__UpperCamelCase :Tuple = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=__lowercase , )
__UpperCamelCase :Any = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__UpperCamelCase :Optional[Any] = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
__UpperCamelCase :Dict = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(__lowercase))
self.assertFalse(_inputs_have_equal_length(__lowercase))
def UpperCamelCase__ ( self) -> Any:
self._check_padding(numpify=__lowercase)
def UpperCamelCase__ ( self) -> Dict:
self._check_padding(numpify=__lowercase)
def UpperCamelCase__ ( self) -> Any:
self._check_truncation(numpify=__lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
self._check_truncation(numpify=__lowercase)
@require_torch
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :int = self.feature_extraction_class(**self.feat_extract_dict)
__UpperCamelCase :Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCamelCase :str = feat_extract.model_input_names[0]
__UpperCamelCase :int = BatchFeature({input_name: speech_inputs})
__UpperCamelCase :List[str] = feat_extract.pad(__lowercase , padding='''longest''' , return_tensors='''np''')[input_name]
__UpperCamelCase :List[Any] = feat_extract.pad(__lowercase , padding='''longest''' , return_tensors='''pt''')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :int = self.feature_extraction_class(**self.feat_extract_dict)
__UpperCamelCase :Any = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCamelCase :Any = feat_extract.model_input_names[0]
__UpperCamelCase :Union[str, Any] = BatchFeature({input_name: speech_inputs})
__UpperCamelCase :str = feat_extract.pad(__lowercase , padding='''longest''' , return_tensors='''np''')[input_name]
__UpperCamelCase :Optional[Any] = feat_extract.pad(__lowercase , padding='''longest''' , return_tensors='''tf''')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :List[Any] = self.feat_extract_dict
__UpperCamelCase :Dict = True
__UpperCamelCase :Dict = self.feature_extraction_class(**__lowercase)
__UpperCamelCase :List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCamelCase :Any = [len(__lowercase) for x in speech_inputs]
__UpperCamelCase :int = feat_extract.model_input_names[0]
__UpperCamelCase :Optional[int] = BatchFeature({input_name: speech_inputs})
__UpperCamelCase :Union[str, Any] = feat_extract.pad(__lowercase , padding='''longest''' , return_tensors='''np''')
self.assertIn('''attention_mask''' , __lowercase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[int] = self.feat_extract_dict
__UpperCamelCase :Optional[int] = True
__UpperCamelCase :Dict = self.feature_extraction_class(**__lowercase)
__UpperCamelCase :List[str] = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCamelCase :List[Any] = [len(__lowercase) for x in speech_inputs]
__UpperCamelCase :List[Any] = feat_extract.model_input_names[0]
__UpperCamelCase :int = BatchFeature({input_name: speech_inputs})
__UpperCamelCase :Dict = min(__lowercase)
__UpperCamelCase :Union[str, Any] = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=__lowercase , truncation=__lowercase , return_tensors='''np''')
self.assertIn('''attention_mask''' , __lowercase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 452 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
return str(A ) == str(A )[::-1]
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
return int(A ) + int(str(A )[::-1] )
def lowerCamelCase__ ( A : int = 1_00_00 ):
'''simple docstring'''
UpperCAmelCase = []
for num in range(1 , A ):
UpperCAmelCase = 0
UpperCAmelCase = num
while iterations < 50:
UpperCAmelCase = sum_reverse(A )
iterations += 1
if is_palindrome(A ):
break
else:
lychrel_nums.append(A )
return len(A )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 210 |
'''simple docstring'''
from string import ascii_uppercase
_lowercase : Dict = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
if isinstance(A , A ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(A , A ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(A , A ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
UpperCAmelCase = ''''''
UpperCAmelCase = 0
UpperCAmelCase = 0
while div != 1:
UpperCAmelCase , UpperCAmelCase = divmod(A , A )
if base >= 11 and 9 < mod < 36:
UpperCAmelCase = ALPHABET_VALUES[str(A )]
else:
UpperCAmelCase = str(A )
new_value += actual_value
UpperCAmelCase = num // base
UpperCAmelCase = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(A )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 210 | 1 |
import colorsys
from PIL import Image # type: ignore
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = x
lowerCamelCase_ : Dict = y
for step in range(lowerCAmelCase_): # noqa: B007
lowerCamelCase_ : Union[str, Any] = a * a - b * b + x
lowerCamelCase_ : List[str] = 2 * a * b + y
lowerCamelCase_ : Union[str, Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(lowerCAmelCase_ , 1 , 1))
def __magic_name__ ( lowerCAmelCase_ = 800 , lowerCAmelCase_ = 600 , lowerCAmelCase_ = -0.6 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 3.2 , lowerCAmelCase_ = 50 , lowerCAmelCase_ = True , ):
'''simple docstring'''
lowerCamelCase_ : Tuple = Image.new("RGB" , (image_width, image_height))
lowerCamelCase_ : Optional[int] = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase_):
for image_y in range(lowerCAmelCase_):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase_ : List[str] = figure_width / image_width * image_height
lowerCamelCase_ : Optional[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase_ : Tuple = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase_ : Any = get_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase_ : Optional[int] = get_color_coded_rgb(lowerCAmelCase_)
else:
lowerCamelCase_ : Union[str, Any] = get_black_and_white_rgb(lowerCAmelCase_)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__magic_name__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 73 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = StableDiffusionDiffEditPipeline
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
__UpperCAmelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : List[str] = frozenset([] )
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
lowerCamelCase_ : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCamelCase_ : Dict = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase_ : Optional[Any] = CLIPTextModel(a_ )
lowerCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : str = floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : List[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : List[Any] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Tuple = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Any = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Tuple = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : int = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Optional[int] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Union[str, Any] = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self ):
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : int = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a_ , a_ , a_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase_ : int = self.get_dummy_inputs(a_ )
lowerCamelCase_ : int = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
lowerCamelCase_ : Optional[int] = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCamelCase_ : List[str] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : Optional[int] = pipe_loaded(**a_ )[0]
lowerCamelCase_ : Optional[int] = np.abs(output - output_loaded ).max()
self.assertLess(a_ , 1E-4 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : List[Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = self.get_dummy_mask_inputs(a_ )
lowerCamelCase_ : int = pipe.generate_mask(**a_ )
lowerCamelCase_ : List[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase_ : List[str] = np.array([0] * 9 )
lowerCamelCase_ : Optional[int] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Dict = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : Dict = pipe.invert(**a_ ).images
lowerCamelCase_ : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Dict = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
def _UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : int = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"}
lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler(**a_ )
lowerCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler(**a_ )
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : int = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : str = pipe.invert(**a_ ).images
lowerCamelCase_ : int = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Union[str, Any] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _UpperCamelCase ( cls ):
lowerCamelCase_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCamelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) )
lowerCamelCase_ : List[Any] = raw_image
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = torch.manual_seed(0 )
lowerCamelCase_ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : str = "a bowl of fruit"
lowerCamelCase_ : Optional[int] = "a bowl of pears"
lowerCamelCase_ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents
lowerCamelCase_ : List[str] = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = "a bowl of fruit"
lowerCamelCase_ : Dict = "a bowl of pears"
lowerCamelCase_ : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents
lowerCamelCase_ : Any = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 73 | 1 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase: Optional[int], _lowerCamelCase: str ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = len(_lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCAmelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_lowerCamelCase ):
return None
lowerCAmelCase = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
lowerCAmelCase = left
lowerCAmelCase = point
elif point > right:
lowerCAmelCase = right
lowerCAmelCase = point
else:
if item < current_item:
lowerCAmelCase = point - 1
else:
lowerCAmelCase = point + 1
return None
def __magic_name__ ( _lowerCamelCase: int, _lowerCamelCase: Any, _lowerCamelCase: Optional[Any], _lowerCamelCase: Union[str, Any] ) -> Any:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCAmelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, point - 1 )
else:
return interpolation_search_by_recursion(
_lowerCamelCase, _lowerCamelCase, point + 1, _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase: Dict ) -> int:
'''simple docstring'''
if collection != sorted(_lowerCamelCase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
UpperCAmelCase = 0
if debug == 1:
UpperCAmelCase = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
UpperCAmelCase = 6_7
UpperCAmelCase = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("""Not found""")
| 535 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 535 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = (EulerDiscreteScheduler,)
_A = 10
def lowerCAmelCase ( self : Optional[Any] , **A_ : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: Tuple = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**A_ )
return config
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=A_ )
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A_ )
def lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_: Any = self.scheduler_classes[0]
lowerCamelCase_: Optional[int] = self.get_scheduler_config()
lowerCamelCase_: Tuple = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_: Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_: Optional[Any] = self.dummy_model()
lowerCamelCase_: List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_: Optional[int] = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_: Dict = scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_: Tuple = model(A_ , A_ )
lowerCamelCase_: int = scheduler.step(A_ , A_ , A_ , generator=A_ )
lowerCamelCase_: Union[str, Any] = output.prev_sample
lowerCamelCase_: int = torch.sum(torch.abs(A_ ) )
lowerCamelCase_: int = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: Optional[int] = self.scheduler_classes[0]
lowerCamelCase_: Dict = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase_: Any = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_: Any = torch.manual_seed(0 )
lowerCamelCase_: Dict = self.dummy_model()
lowerCamelCase_: Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_: Any = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_: int = scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_: Optional[Any] = model(A_ , A_ )
lowerCamelCase_: List[str] = scheduler.step(A_ , A_ , A_ , generator=A_ )
lowerCamelCase_: str = output.prev_sample
lowerCamelCase_: int = torch.sum(torch.abs(A_ ) )
lowerCamelCase_: List[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
lowerCamelCase_: Any = self.scheduler_classes[0]
lowerCamelCase_: Optional[Any] = self.get_scheduler_config()
lowerCamelCase_: int = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps , device=A_ )
lowerCamelCase_: Dict = torch.manual_seed(0 )
lowerCamelCase_: Union[str, Any] = self.dummy_model()
lowerCamelCase_: str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCamelCase_: str = sample.to(A_ )
for t in scheduler.timesteps:
lowerCamelCase_: str = scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_: str = model(A_ , A_ )
lowerCamelCase_: List[Any] = scheduler.step(A_ , A_ , A_ , generator=A_ )
lowerCamelCase_: int = output.prev_sample
lowerCamelCase_: List[Any] = torch.sum(torch.abs(A_ ) )
lowerCamelCase_: Optional[int] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_: Any = self.scheduler_classes[0]
lowerCamelCase_: Dict = self.get_scheduler_config()
lowerCamelCase_: int = scheduler_class(**A_ , use_karras_sigmas=A_ )
scheduler.set_timesteps(self.num_inference_steps , device=A_ )
lowerCamelCase_: List[str] = torch.manual_seed(0 )
lowerCamelCase_: Union[str, Any] = self.dummy_model()
lowerCamelCase_: Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCamelCase_: List[str] = sample.to(A_ )
for t in scheduler.timesteps:
lowerCamelCase_: int = scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_: int = model(A_ , A_ )
lowerCamelCase_: List[Any] = scheduler.step(A_ , A_ , A_ , generator=A_ )
lowerCamelCase_: List[Any] = output.prev_sample
lowerCamelCase_: Optional[int] = torch.sum(torch.abs(A_ ) )
lowerCamelCase_: int = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 584 | from collections import defaultdict
from math import ceil, sqrt
def UpperCAmelCase_ ( _UpperCAmelCase = 1_0_0_0_0_0_0 , _UpperCAmelCase = 1_0 ):
lowerCamelCase_: defaultdict = defaultdict(_UpperCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCamelCase_: Dict = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCamelCase_: List[str] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 584 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = StableUnCLIPImgaImgPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase = frozenset([] )
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = 32
UpperCAmelCase_ = embedder_hidden_size
# image encoding components
UpperCAmelCase_ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_UpperCAmelCase , projection_dim=_UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase_ = StableUnCLIPImageNormalizer(embedding_dim=_UpperCAmelCase )
UpperCAmelCase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCAmelCase , layers_per_block=1 , upcast_attention=_UpperCAmelCase , use_linear_projection=_UpperCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL()
UpperCAmelCase_ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : int=True ) -> str:
'''simple docstring'''
if str(_UpperCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if pil_image:
UpperCAmelCase_ = input_image * 0.5 + 0.5
UpperCAmelCase_ = input_image.clamp(0 , 1 )
UpperCAmelCase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ = DiffusionPipeline.numpy_to_pil(_UpperCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableUnCLIPImgaImgPipeline(**_UpperCAmelCase )
UpperCAmelCase_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(_UpperCAmelCase )
inputs.update({"image_embeds": None} )
UpperCAmelCase_ = sd_pipe(**_UpperCAmelCase ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_UpperCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_UpperCAmelCase )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
UpperCAmelCase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(_UpperCAmelCase , "anime turle" , generator=_UpperCAmelCase , output_type="np" )
UpperCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
UpperCAmelCase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(_UpperCAmelCase , "anime turle" , generator=_UpperCAmelCase , output_type="np" )
UpperCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
UpperCAmelCase_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = pipe(
_UpperCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 82 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__SCREAMING_SNAKE_CASE : List[Any] = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def a_ ( UpperCamelCase_ , UpperCamelCase_ ):
with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f:
A_ = json.loads(f.read() )
A_ = collections.OrderedDict()
A_ = collections.OrderedDict()
A_ = collections.OrderedDict()
with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(UpperCamelCase_ ):
A_ = b
A_ = idx
for wd in b:
A_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __lowerCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase : str =VOCAB_FILES_NAMES
_UpperCAmelCase : str =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] =["input_ids", "attention_mask"]
def __init__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any]="<|endoftext|>" , lowerCAmelCase : Optional[int]="<|endoftext|>" , lowerCAmelCase : Tuple="<|startoftext|>" , lowerCAmelCase : Union[str, Any]="<|endoftext|>" , lowerCAmelCase : List[Any]=False , **lowerCAmelCase : Optional[Any] , ):
super().__init__(
unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , do_clean_text=lowerCAmelCase , **lowerCAmelCase , )
if not os.path.isfile(lowerCAmelCase ):
raise ValueError(
F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(lowerCAmelCase ):
raise ValueError(
F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
A_ = do_clean_text
A_ , A_ , A_ , A_ = load_vocab_and_emoji(lowerCAmelCase , lowerCAmelCase )
A_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _UpperCAmelCase ( self : str ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def _UpperCAmelCase ( self : Optional[int] ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Tuple ):
return self.subword_tokenizer.tokenize(lowerCAmelCase , clean=self.do_clean_text )
def _UpperCAmelCase ( self : str , lowerCAmelCase : int ):
return self.vocab.get(lowerCAmelCase , self.vocab.get(self.unk_token ) )
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase : List[str] ):
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase )
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Optional[Any] ):
A_ = "".join(lowerCAmelCase ).strip()
return out_string
def _UpperCAmelCase ( self : Any , lowerCAmelCase : "Conversation" ):
A_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
A_ = input_ids[-self.model_max_length :]
return input_ids
def _UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
A_ = 0
if os.path.isdir(lowerCAmelCase ):
A_ = os.path.join(
lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A_ = os.path.join(
lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
A_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
A_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
A_ = token_index
writer.write(",".join(lowerCAmelCase ) + "\n" )
index += 1
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , lowerCAmelCase )
return vocab_file, emoji_file
class __lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
A_ = vocab # same as swe
A_ = ids_to_tokens # same as bpe
A_ = emoji
A_ = np.max([len(lowerCAmelCase ) for w in self.vocab.keys()] )
A_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
A_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
A_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
A_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
A_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
A_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
A_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
A_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
A_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self : Union[str, Any] ):
return len(self.ids_to_tokens )
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase : int ):
A_ = self.content_repattera.sub("<URL>" , lowerCAmelCase )
A_ = self.content_repattera.sub("<EMAIL>" , lowerCAmelCase )
A_ = self.content_repattera.sub("<TEL>" , lowerCAmelCase )
A_ = self.content_repattera.sub("<DATE>" , lowerCAmelCase )
A_ = self.content_repattera.sub("<DATE>" , lowerCAmelCase )
A_ = self.content_repattera.sub("<PRICE>" , lowerCAmelCase )
A_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
A_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def _UpperCAmelCase ( self : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple=False ):
A_ = text.replace(" " , "<SP>" )
A_ = text.replace(" " , "<SP>" )
A_ = text.replace("\r\n" , "<BR>" )
A_ = text.replace("\n" , "<BR>" )
A_ = text.replace("\r" , "<BR>" )
A_ = text.replace("\t" , "<TAB>" )
A_ = text.replace("—" , "ー" )
A_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
A_ = text.replace(lowerCAmelCase , lowerCAmelCase )
if clean:
A_ = self.clean_text(lowerCAmelCase )
def check_simbol(lowerCAmelCase : Tuple ):
A_ = x.encode()
if len(lowerCAmelCase ) == 1 and len(lowerCAmelCase ) == 2:
A_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC2A1 and c <= 0xC2BF)
or (c >= 0xC780 and c <= 0xC783)
or (c >= 0xCAB9 and c <= 0xCBBF)
or (c >= 0xCC80 and c <= 0xCDA2)
):
return True
return False
def checkuae(lowerCAmelCase : Tuple ):
A_ = x.encode()
if len(lowerCAmelCase ) == 1 and len(lowerCAmelCase ) == 3:
A_ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE2_8080 and c <= 0xE2_B07F:
return True
return False
A_ = 0
A_ = []
while pos < len(lowerCAmelCase ):
A_ = min(len(lowerCAmelCase ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
A_ = [] # (token_id, token, pos)
for e in range(lowerCAmelCase , lowerCAmelCase , -1 ):
A_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase ) > 2:
A_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCAmelCase ) > 0:
# the smallest token_id is adopted
A_ , A_ , A_ = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[0] )[0]
result.append(lowerCAmelCase )
A_ = e
else:
A_ = pos + 1
A_ = text[pos:end]
if check_simbol(lowerCAmelCase ):
result.append("<KIGOU>" )
elif checkuae(lowerCAmelCase ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
A_ = end
return result
def _UpperCAmelCase ( self : Dict , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any]="\n" ):
A_ = []
A_ = []
A_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCAmelCase ) > 0:
words.append(bytearray(lowerCAmelCase ).decode("utf-8" , errors="replace" ) )
A_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(lowerCAmelCase )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
words.append(bytearray(lowerCAmelCase ).decode("utf-8" , errors="replace" ) )
A_ = "".join(lowerCAmelCase )
return text
| 452 | 0 |
class lowerCamelCase :
def __init__( self , lowercase__ = "" , lowercase__ = False):
__UpperCAmelCase : Tuple = {}
# A node will be a leaf if the tree contains its word
__UpperCAmelCase : str = is_leaf
__UpperCAmelCase : Optional[int] = prefix
def A( self , lowercase__):
__UpperCAmelCase : Optional[int] = 0
for q, w in zip(self.prefix , lowercase__):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def A( self , lowercase__):
for word in words:
self.insert(lowercase__)
def A( self , lowercase__):
if self.prefix == word:
__UpperCAmelCase : Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__UpperCAmelCase : List[Any] = RadixNode(prefix=lowercase__ , is_leaf=lowercase__)
else:
__UpperCAmelCase : int = self.nodes[word[0]]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = incoming_node.match(
lowercase__)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowercase__)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__UpperCAmelCase : List[Any] = remaining_prefix
__UpperCAmelCase : Union[str, Any] = self.nodes[matching_string[0]]
__UpperCAmelCase : int = RadixNode(lowercase__ , lowercase__)
__UpperCAmelCase : Optional[int] = aux_node
if remaining_word == "":
__UpperCAmelCase : str = True
else:
self.nodes[matching_string[0]].insert(lowercase__)
def A( self , lowercase__):
__UpperCAmelCase : Tuple = self.nodes.get(word[0] , lowercase__)
if not incoming_node:
return False
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = incoming_node.match(
lowercase__)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowercase__)
def A( self , lowercase__):
__UpperCAmelCase : str = self.nodes.get(word[0] , lowercase__)
if not incoming_node:
return False
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = incoming_node.match(
lowercase__)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowercase__)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
__UpperCAmelCase : List[Any] = list(self.nodes.values())[0]
__UpperCAmelCase : int = merging_node.is_leaf
self.prefix += merging_node.prefix
__UpperCAmelCase : Dict = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
__UpperCAmelCase : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
__UpperCAmelCase : List[str] = list(incoming_node.nodes.values())[0]
__UpperCAmelCase : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__UpperCAmelCase : Any = merging_node.nodes
return True
def A( self , lowercase__ = 0):
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''')
for value in self.nodes.values():
value.print_tree(height + 1)
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = '''banana bananas bandana band apple all beast'''.split()
__UpperCAmelCase : Union[str, Any] = RadixNode()
root.insert_many(lowercase_ )
assert all(root.find(lowercase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
assert test_trie()
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : str = RadixNode()
__UpperCAmelCase : List[str] = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(lowercase_ )
print('''Words:''' , lowercase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 717 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
__UpperCAmelCase = "2020.9.26"
__UpperCAmelCase = "xcodz-dot, cclaus, dhruvmanila"
def lowerCAmelCase_ ( __A : int , __A : str , __A : Union[str, Any] , __A : Any , __A : Dict ):
'''simple docstring'''
if not all(isinstance(SCREAMING_SNAKE_CASE_ , (float, int) ) for val in locals().values() ):
snake_case: Dict = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(SCREAMING_SNAKE_CASE_ )
snake_case: int = ((x * distance) / (z + distance)) * scale
snake_case: Union[str, Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCAmelCase_ ( __A : str , __A : List[Any] , __A : Tuple , __A : Tuple , __A : Union[str, Any] ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('Axis must be a str' )
snake_case: Optional[Any] = locals()
del input_variables["axis"]
if not all(isinstance(SCREAMING_SNAKE_CASE_ , (float, int) ) for val in input_variables.values() ):
snake_case: Optional[Any] = (
'Input values except axis must either be float or int: '
f"""{list(input_variables.values() )}"""
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
snake_case: List[Any] = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
snake_case: int = x * math.cos(SCREAMING_SNAKE_CASE_ ) - y * math.sin(SCREAMING_SNAKE_CASE_ )
snake_case: List[Any] = y * math.cos(SCREAMING_SNAKE_CASE_ ) + x * math.sin(SCREAMING_SNAKE_CASE_ )
snake_case: Any = z
elif axis == "x":
snake_case: Optional[Any] = y * math.cos(SCREAMING_SNAKE_CASE_ ) - z * math.sin(SCREAMING_SNAKE_CASE_ )
snake_case: Optional[Any] = z * math.cos(SCREAMING_SNAKE_CASE_ ) + y * math.sin(SCREAMING_SNAKE_CASE_ )
snake_case: str = x
elif axis == "y":
snake_case: Dict = x * math.cos(SCREAMING_SNAKE_CASE_ ) - z * math.sin(SCREAMING_SNAKE_CASE_ )
snake_case: Optional[Any] = z * math.cos(SCREAMING_SNAKE_CASE_ ) + x * math.sin(SCREAMING_SNAKE_CASE_ )
snake_case: List[Any] = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }')
print(F'{rotate(1.0, 2.0, 3.0, "y", 90.0) = }') | 329 | '''simple docstring'''
import requests
__snake_case = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def A_ ( SCREAMING_SNAKE_CASE_ ) ->None:
# fetching a list of articles in json format
lowercase_ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""] , 1 ):
print(f"""{i}.) {article["title"]}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 451 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "AutoImageProcessor"
_a = "AutoTokenizer"
def __init__( self , _a , _a ) -> Dict:
super().__init__(_a , _a )
_A : Tuple = self.image_processor
def __call__( self , _a=None , _a=None , _a=None , **_a ) -> List[Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_A : Dict = self.tokenizer(_a , return_tensors=_a , **_a )
if images is not None:
_A : Union[str, Any] = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def a__ ( self , *_a , **_a ) -> Any:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> Optional[int]:
return ["input_ids", "attention_mask", "pixel_values"]
| 54 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_A : str = self.transformer_dir
shutil.copy(
os.path.join(_a , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a__ ( self ) -> Optional[int]:
_A : List[str] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a__ ( self , _a , _a , _a , _a=None ) -> Optional[Any]:
_A : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_A : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_A : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_A : Optional[int] = black.format_str(_a , mode=_a )
_A : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_a , """w""" , newline="""\n""" ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_a )
with open(_a , """r""" ) as f:
self.assertTrue(f.read() , _a )
def a__ ( self ) -> str:
_A : Union[str, Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _a , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _a ) , )
# Copy consistency with a really long name
_A : List[str] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , _a , _a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _a , overwrite_result=re.sub("""Bert""" , """TestModel""" , _a ) , )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_A , _A : Tuple = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
self.assertFalse(_a )
self.assertEqual(_a , _a )
_A , _A : List[str] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_a )
_A : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_A : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A , _A : Optional[int] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_a , _a )
| 54 | 1 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _UpperCamelCase (_lowerCamelCase : Any )-> Any:
'''simple docstring'''
__snake_case = tmp_path / '''file.csv'''
__snake_case = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(_lowerCamelCase , '''w''' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = tmp_path / '''malformed_file.csv'''
__snake_case = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(_lowerCamelCase , '''w''' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = tmp_path / '''csv_with_image.csv'''
__snake_case = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(_lowerCamelCase , '''w''' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def _UpperCamelCase (_lowerCamelCase : Any )-> Tuple:
'''simple docstring'''
__snake_case = tmp_path / '''csv_with_label.csv'''
__snake_case = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(_lowerCamelCase , '''w''' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def _UpperCamelCase (_lowerCamelCase : Any )-> Union[str, Any]:
'''simple docstring'''
__snake_case = tmp_path / '''csv_with_int_list.csv'''
__snake_case = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(_lowerCamelCase , '''w''' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any )-> Union[str, Any]:
'''simple docstring'''
__snake_case = Csv()
__snake_case = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCamelCase , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(_lowerCamelCase ) in record.message
for record in caplog.records )
@require_pil
def _UpperCamelCase (_lowerCamelCase : Dict )-> Optional[Any]:
'''simple docstring'''
with open(_lowerCamelCase , encoding='''utf-8''' ) as f:
__snake_case = f.read().splitlines()[1]
__snake_case = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
__snake_case = csv._generate_tables([[csv_file_with_image]] )
__snake_case = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
__snake_case = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
with open(_lowerCamelCase , encoding='''utf-8''' ) as f:
__snake_case = f.read().splitlines()[1:]
__snake_case = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
__snake_case = csv._generate_tables([[csv_file_with_label]] )
__snake_case = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
__snake_case = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(_lowerCamelCase ) for label in labels]
def _UpperCamelCase (_lowerCamelCase : Tuple )-> Any:
'''simple docstring'''
__snake_case = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda _lowerCamelCase : [int(_lowerCamelCase ) for i in x.split()]} )
__snake_case = csv._generate_tables([[csv_file_with_int_list]] )
__snake_case = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
__snake_case = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 24 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Tuple = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''ibert'''
def __init__(self : int , _UpperCAmelCase : Union[str, Any]=3_0522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[Any]=3072 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=1E-1_2 , _UpperCAmelCase : int=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : int="absolute" , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]="none" , **_UpperCAmelCase : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = quant_mode
lowercase__ = force_dequant
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 15 | 0 |
UpperCamelCase = 9.8_0665
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = g ) -> float:
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 144 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase = logging.get_logger(__name__)
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
lowerCamelCase_ : int = b.T
lowerCamelCase_ : Tuple = np.sum(np.square(lowerCamelCase__ ) , axis=1 )
lowerCamelCase_ : int = np.sum(np.square(lowerCamelCase__ ) , axis=0 )
lowerCamelCase_ : Optional[Any] = np.matmul(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Tuple = aa[:, None] - 2 * ab + ba[None, :]
return d
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
lowerCamelCase_ : Any = x.reshape(-1 , 3 )
lowerCamelCase_ : List[Any] = squared_euclidean_distance(lowerCamelCase__ , lowerCamelCase__ )
return np.argmin(lowerCamelCase__ , axis=1 )
class lowerCamelCase__ ( UpperCAmelCase ):
lowerCamelCase_ : int = ['pixel_values']
def __init__(self : Tuple , _snake_case : Optional[Union[List[List[int]], np.ndarray]] = None , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : bool = True , **_snake_case : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**_snake_case )
lowerCamelCase_ : Union[str, Any] = size if size is not None else {'height': 256, 'width': 256}
lowerCamelCase_ : Union[str, Any] = get_size_dict(_snake_case )
lowerCamelCase_ : int = np.array(_snake_case ) if clusters is not None else None
lowerCamelCase_ : Dict = do_resize
lowerCamelCase_ : str = size
lowerCamelCase_ : str = resample
lowerCamelCase_ : Any = do_normalize
lowerCamelCase_ : Any = do_color_quantize
def UpperCAmelCase_ (self : Union[str, Any] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : List[Any] , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
_snake_case , size=(size['height'], size['width']) , resample=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCAmelCase_ (self : Optional[Any] , _snake_case : np.ndarray , _snake_case : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ : List[str] = rescale(image=_snake_case , scale=1 / 127.5 , data_format=_snake_case )
lowerCamelCase_ : int = image - 1
return image
def UpperCAmelCase_ (self : Dict , _snake_case : ImageInput , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[List[List[int]], np.ndarray]] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **_snake_case : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowerCamelCase_ : Dict = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ : Optional[int] = size if size is not None else self.size
lowerCamelCase_ : Any = get_size_dict(_snake_case )
lowerCamelCase_ : str = resample if resample is not None else self.resample
lowerCamelCase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ : Optional[int] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowerCamelCase_ : List[Any] = clusters if clusters is not None else self.clusters
lowerCamelCase_ : Tuple = np.array(_snake_case )
lowerCamelCase_ : Optional[Any] = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase_ : Optional[int] = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowerCamelCase_ : int = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_normalize:
lowerCamelCase_ : List[Any] = [self.normalize(image=_snake_case ) for image in images]
if do_color_quantize:
lowerCamelCase_ : str = [to_channel_dimension_format(_snake_case , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowerCamelCase_ : Optional[int] = np.array(_snake_case )
lowerCamelCase_ : Any = color_quantize(_snake_case , _snake_case ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
lowerCamelCase_ : Optional[Any] = images.shape[0]
lowerCamelCase_ : int = images.reshape(_snake_case , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowerCamelCase_ : Optional[int] = list(_snake_case )
else:
lowerCamelCase_ : str = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
lowerCamelCase_ : int = {'input_ids': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 144 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : List[str]) -> List[str]:
'''simple docstring'''
_lowercase : int = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_lowercase : Tuple = 1_28
elif "12-12" in model_name:
_lowercase : int = 12
_lowercase : Optional[Any] = 12
elif "14-14" in model_name:
_lowercase : Union[str, Any] = 14
_lowercase : List[Any] = 14
elif "16-16" in model_name:
_lowercase : int = 16
_lowercase : str = 16
else:
raise ValueError('Model not supported')
_lowercase : List[str] = 'huggingface/label-files'
if "speech-commands" in model_name:
_lowercase : Tuple = 35
_lowercase : List[str] = 'speech-commands-v2-id2label.json'
else:
_lowercase : Optional[Any] = 5_27
_lowercase : Dict = 'audioset-id2label.json'
_lowercase : Any = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset') , 'r'))
_lowercase : Optional[int] = {int(lowerCAmelCase__): v for k, v in idalabel.items()}
_lowercase : Any = idalabel
_lowercase : int = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Any) -> Any:
'''simple docstring'''
if "module.v" in name:
_lowercase : Dict = name.replace('module.v' , 'audio_spectrogram_transformer')
if "cls_token" in name:
_lowercase : Union[str, Any] = name.replace('cls_token' , 'embeddings.cls_token')
if "dist_token" in name:
_lowercase : str = name.replace('dist_token' , 'embeddings.distillation_token')
if "pos_embed" in name:
_lowercase : Optional[Any] = name.replace('pos_embed' , 'embeddings.position_embeddings')
if "patch_embed.proj" in name:
_lowercase : Any = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection')
# transformer blocks
if "blocks" in name:
_lowercase : Optional[int] = name.replace('blocks' , 'encoder.layer')
if "attn.proj" in name:
_lowercase : Optional[int] = name.replace('attn.proj' , 'attention.output.dense')
if "attn" in name:
_lowercase : str = name.replace('attn' , 'attention.self')
if "norm1" in name:
_lowercase : str = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
_lowercase : int = name.replace('norm2' , 'layernorm_after')
if "mlp.fc1" in name:
_lowercase : Optional[int] = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
_lowercase : Optional[Any] = name.replace('mlp.fc2' , 'output.dense')
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_lowercase : Union[str, Any] = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm')
# classifier head
if "module.mlp_head.0" in name:
_lowercase : Optional[int] = name.replace('module.mlp_head.0' , 'classifier.layernorm')
if "module.mlp_head.1" in name:
_lowercase : Tuple = name.replace('module.mlp_head.1' , 'classifier.dense')
return name
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowercase : List[Any] = orig_state_dict.pop(lowerCAmelCase__)
if "qkv" in key:
_lowercase : Union[str, Any] = key.split('.')
_lowercase : int = int(key_split[3])
_lowercase : Union[str, Any] = config.hidden_size
if "weight" in key:
_lowercase : str = val[:dim, :]
_lowercase : Optional[Any] = val[dim : dim * 2, :]
_lowercase : List[str] = val[-dim:, :]
else:
_lowercase : List[Any] = val[:dim]
_lowercase : str = val[dim : dim * 2]
_lowercase : Tuple = val[-dim:]
else:
_lowercase : List[str] = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : int) -> Optional[Any]:
'''simple docstring'''
_lowercase : str = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__)
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int]=False) -> List[str]:
'''simple docstring'''
_lowercase : int = get_audio_spectrogram_transformer_config(lowerCAmelCase__)
_lowercase : Any = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
_lowercase : Union[str, Any] = model_name_to_url[model_name]
_lowercase : List[str] = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='cpu')
# remove some keys
remove_keys(lowerCAmelCase__)
# rename some keys
_lowercase : Any = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__)
# load 🤗 model
_lowercase : Union[str, Any] = ASTForAudioClassification(lowerCAmelCase__)
model.eval()
model.load_state_dict(lowerCAmelCase__)
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_lowercase : Union[str, Any] = -4.2_6_7_7_3_9_3 if 'speech-commands' not in model_name else -6.8_4_5_9_7_8
_lowercase : Optional[int] = 4.5_6_8_9_9_7_4 if 'speech-commands' not in model_name else 5.5_6_5_4_5_2_6
_lowercase : Dict = 10_24 if 'speech-commands' not in model_name else 1_28
_lowercase : Optional[Any] = ASTFeatureExtractor(mean=lowerCAmelCase__ , std=lowerCAmelCase__ , max_length=lowerCAmelCase__)
if "speech-commands" in model_name:
_lowercase : List[Any] = load_dataset('speech_commands' , 'v0.02' , split='validation')
_lowercase : Tuple = dataset[0]['audio']['array']
else:
_lowercase : List[str] = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
_lowercase , _lowercase : Dict = torchaudio.load(lowerCAmelCase__)
_lowercase : Union[str, Any] = waveform.squeeze().numpy()
_lowercase : Any = feature_extractor(lowerCAmelCase__ , sampling_rate=1_60_00 , return_tensors='pt')
# forward pass
_lowercase : str = model(**lowerCAmelCase__)
_lowercase : int = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_lowercase : Union[str, Any] = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2])
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_lowercase : Any = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8])
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_lowercase : int = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4])
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_lowercase : Optional[int] = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7])
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_lowercase : Tuple = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3])
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_lowercase : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3])
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_lowercase : Optional[int] = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0])
elif model_name == "ast-finetuned-speech-commands-v2":
_lowercase : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4])
else:
raise ValueError('Unknown model name')
if not torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4):
raise ValueError('Logits don\'t match')
print('Looks ok!')
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__).mkdir(exist_ok=lowerCAmelCase__)
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(lowerCAmelCase__)
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''')
feature_extractor.save_pretrained(lowerCAmelCase__)
if push_to_hub:
print('Pushing model and feature extractor to the hub...')
model.push_to_hub(F'''MIT/{model_name}''')
feature_extractor.push_to_hub(F'''MIT/{model_name}''')
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 125 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
A = '''hf-internal-testing/tiny-random-bert'''
A = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
A = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
_lowercase : Tuple = cached_file(UpperCamelCase ,UpperCamelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase ,UpperCamelCase ) ) )
with open(os.path.join(UpperCamelCase ,'refs' ,'main' ) ) as f:
_lowercase : Optional[Any] = f.read()
self.assertEqual(UpperCamelCase ,os.path.join(UpperCamelCase ,'snapshots' ,UpperCamelCase ,UpperCamelCase ) )
self.assertTrue(os.path.isfile(UpperCamelCase ) )
# File is cached at the same place the second time.
_lowercase : Optional[int] = cached_file(UpperCamelCase ,UpperCamelCase )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
# Using a specific revision to test the full commit hash.
_lowercase : Tuple = cached_file(UpperCamelCase ,UpperCamelCase ,revision='9b8c223' )
self.assertEqual(UpperCamelCase ,os.path.join(UpperCamelCase ,'snapshots' ,UpperCamelCase ,UpperCamelCase ) )
def _lowerCamelCase ( self : Any ) -> Optional[int]:
with self.assertRaisesRegex(UpperCamelCase ,'is not a valid model identifier' ):
_lowercase : List[str] = cached_file('tiny-random-bert' ,UpperCamelCase )
with self.assertRaisesRegex(UpperCamelCase ,'is not a valid git identifier' ):
_lowercase : Tuple = cached_file(UpperCamelCase ,UpperCamelCase ,revision='aaaa' )
with self.assertRaisesRegex(UpperCamelCase ,'does not appear to have a file named' ):
_lowercase : Tuple = cached_file(UpperCamelCase ,'conf' )
def _lowerCamelCase ( self : Optional[Any] ) -> List[str]:
with self.assertRaisesRegex(UpperCamelCase ,'does not appear to have a file named' ):
_lowercase : Tuple = cached_file(UpperCamelCase ,'conf' )
with open(os.path.join(UpperCamelCase ,'refs' ,'main' ) ) as f:
_lowercase : Union[str, Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase ,'.no_exist' ,UpperCamelCase ,'conf' ) ) )
_lowercase : Dict = cached_file(UpperCamelCase ,'conf' ,_raise_exceptions_for_missing_entries=UpperCamelCase )
self.assertIsNone(UpperCamelCase )
_lowercase : Optional[Any] = cached_file(UpperCamelCase ,'conf' ,local_files_only=UpperCamelCase ,_raise_exceptions_for_missing_entries=UpperCamelCase )
self.assertIsNone(UpperCamelCase )
_lowercase : List[Any] = mock.Mock()
_lowercase : Dict = 500
_lowercase : List[Any] = {}
_lowercase : List[Any] = HTTPError
_lowercase : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=UpperCamelCase ) as mock_head:
_lowercase : List[str] = cached_file(UpperCamelCase ,'conf' ,_raise_exceptions_for_connection_errors=UpperCamelCase )
self.assertIsNone(UpperCamelCase )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCamelCase ( self : Any ) -> Optional[int]:
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' ,UpperCamelCase ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' ,UpperCamelCase ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' ,UpperCamelCase ) )
def _lowerCamelCase ( self : Any ) -> Any:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('bert-base-cased' ,'ahah.txt' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase ,'is not a valid model identifier' ):
get_file_from_repo('bert-base-case' ,UpperCamelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase ,'is not a valid git identifier' ):
get_file_from_repo('bert-base-cased' ,UpperCamelCase ,revision='ahaha' )
_lowercase : int = get_file_from_repo('bert-base-cased' ,UpperCamelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowercase : List[str] = json.loads(open(UpperCamelCase ,'r' ).read() )
self.assertEqual(config['hidden_size'] ,768 )
def _lowerCamelCase ( self : Any ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase : str = Path(UpperCamelCase ) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase ,'a.txt' ) ,str(UpperCamelCase ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase ,'b.txt' ) ) | 125 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__a : int = logging.get_logger(__name__)
__a : int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__a : Optional[Any] = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__a : Tuple = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__a : Tuple = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = SqueezeBertTokenizer
def __init__( self : Any , UpperCamelCase_ : Any=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : int="[UNK]" , UpperCamelCase_ : Tuple="[SEP]" , UpperCamelCase_ : Any="[PAD]" , UpperCamelCase_ : Optional[int]="[CLS]" , UpperCamelCase_ : Optional[int]="[MASK]" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]=None , **UpperCamelCase_ : Dict , ):
"""simple docstring"""
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
__A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCamelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCamelCase_ ) != tokenize_chinese_chars
):
__A = getattr(UpperCamelCase_ , normalizer_state.pop("""type""" ) )
__A = do_lower_case
__A = strip_accents
__A = tokenize_chinese_chars
__A = normalizer_class(**UpperCamelCase_ )
__A = do_lower_case
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple=None ):
"""simple docstring"""
__A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
"""simple docstring"""
__A = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 199 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] ) -> Dict:
"""simple docstring"""
__A = 3_8_4
__A = 7
if "tiny" in model_name:
__A = 9_6
__A = (2, 2, 6, 2)
__A = (3, 6, 1_2, 2_4)
elif "small" in model_name:
__A = 9_6
__A = (2, 2, 1_8, 2)
__A = (3, 6, 1_2, 2_4)
elif "base" in model_name:
__A = 1_2_8
__A = (2, 2, 1_8, 2)
__A = (4, 8, 1_6, 3_2)
__A = 1_2
__A = 5_1_2
elif "large" in model_name:
__A = 1_9_2
__A = (2, 2, 1_8, 2)
__A = (6, 1_2, 2_4, 4_8)
__A = 1_2
__A = 7_6_8
# set label information
__A = 1_5_0
__A = """huggingface/label-files"""
__A = """ade20k-id2label.json"""
__A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="""dataset""" ) , """r""" ) )
__A = {int(__lowercase ): v for k, v in idalabel.items()}
__A = {v: k for k, v in idalabel.items()}
__A = SwinConfig(
embed_dim=__lowercase , depths=__lowercase , num_heads=__lowercase , window_size=__lowercase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
__A = UperNetConfig(
backbone_config=__lowercase , auxiliary_in_channels=__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase , )
return config
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] ) -> Dict:
"""simple docstring"""
__A = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Any:
"""simple docstring"""
__A = dct.pop(__lowercase )
__A = val
def _SCREAMING_SNAKE_CASE ( __lowercase : List[str] , __lowercase : int ) -> Any:
"""simple docstring"""
__A = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__A = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__A = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
__A = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__A = in_proj_weight[:dim, :]
__A = in_proj_bias[: dim]
__A = in_proj_weight[
dim : dim * 2, :
]
__A = in_proj_bias[
dim : dim * 2
]
__A = in_proj_weight[
-dim :, :
]
__A = in_proj_bias[-dim :]
# fmt: on
def _SCREAMING_SNAKE_CASE ( __lowercase : Any ) -> List[str]:
"""simple docstring"""
__A , __A = x.shape
__A = x.reshape(__lowercase , 4 , in_channel // 4 )
__A = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__lowercase , __lowercase )
return x
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] ) -> Dict:
"""simple docstring"""
__A , __A = x.shape
__A = x.reshape(__lowercase , in_channel // 4 , 4 )
__A = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__lowercase , __lowercase )
return x
def _SCREAMING_SNAKE_CASE ( __lowercase : Tuple ) -> Tuple:
"""simple docstring"""
__A = x.shape[0]
__A = x.reshape(4 , in_channel // 4 )
__A = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__lowercase )
return x
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__A = x.shape[0]
__A = x.reshape(in_channel // 4 , 4 )
__A = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__lowercase )
return x
def _SCREAMING_SNAKE_CASE ( __lowercase : Dict , __lowercase : List[Any] , __lowercase : str ) -> Tuple:
"""simple docstring"""
__A = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
__A = model_name_to_url[model_name]
__A = torch.hub.load_state_dict_from_url(__lowercase , map_location="""cpu""" , file_name=__lowercase )[
"""state_dict"""
]
for name, param in state_dict.items():
print(__lowercase , param.shape )
__A = get_upernet_config(__lowercase )
__A = UperNetForSemanticSegmentation(__lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__A = state_dict.pop(__lowercase )
if "bn" in key:
__A = key.replace("""bn""" , """batch_norm""" )
__A = val
# rename keys
__A = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__A = reverse_correct_unfold_reduction_order(__lowercase )
if "norm" in key:
__A = reverse_correct_unfold_norm_order(__lowercase )
model.load_state_dict(__lowercase )
# verify on image
__A = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
__A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert("""RGB""" )
__A = SegformerImageProcessor()
__A = processor(__lowercase , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
__A = model(__lowercase )
__A = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__A = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
__A = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
__A = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
__A = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowercase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__lowercase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__lowercase )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
__a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__a : List[Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 199 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_snake_case : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : Tuple , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Dict , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''shortest_edge''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a = image_std if image_std is not None else OPENAI_CLIP_STD
_a = do_convert_rgb
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Any , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple , ) -> Optional[int]:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : float = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Any , ) -> PIL.Image.Image:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ , param_name='''size''' , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a = [convert_to_rgb(lowerCAmelCase_ ) for image in images]
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 22 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Any = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class A ( _a ):
lowercase_ = 'roformer'
def __init__( self : str , lowerCAmelCase_ : int=5_00_00 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : int=15_36 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=True , **lowerCAmelCase_ : Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = hidden_size if embedding_size is None else embedding_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = rotary_value
_a = use_cache
class A ( _a ):
@property
def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 22 | 1 |
def lowercase ( _a ) -> bool:
if not isinstance(_a ,_a ):
UpperCAmelCase_: Dict = f"Input value of [number={number}] must be an integer"
raise TypeError(_a )
if number < 0:
return False
UpperCAmelCase_: Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 306 |
import os
def lowercase ( _a = "matrix.txt" ) -> int:
with open(os.path.join(os.path.dirname(_a ) ,_a ) ) as in_file:
UpperCAmelCase_: str = in_file.read()
UpperCAmelCase_: Union[str, Any] = [[int(_a ) for cell in row.split("," )] for row in data.strip().splitlines()]
UpperCAmelCase_: List[Any] = [[0 for cell in row] for row in grid]
UpperCAmelCase_: Any = len(grid[0] )
UpperCAmelCase_: int = [[0 for i in range(_a )] for j in range(_a )]
UpperCAmelCase_: int = grid[0][0]
for i in range(1 ,_a ):
UpperCAmelCase_: List[Any] = grid[0][i] + dp[0][i - 1]
for i in range(1 ,_a ):
UpperCAmelCase_: Any = grid[i][0] + dp[i - 1][0]
for i in range(1 ,_a ):
for j in range(1 ,_a ):
UpperCAmelCase_: Union[str, Any] = grid[i][j] + min(dp[i - 1][j] ,dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"""{solution() = }""") | 306 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[Any] = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class UpperCamelCase_ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase_ = 'gptsan-japanese'
UpperCamelCase_ = [
'past_key_values',
]
UpperCamelCase_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , UpperCamelCase=3_60_00 , UpperCamelCase=12_80 , UpperCamelCase=10_24 , UpperCamelCase=81_92 , UpperCamelCase=40_96 , UpperCamelCase=1_28 , UpperCamelCase=10 , UpperCamelCase=0 , UpperCamelCase=16 , UpperCamelCase=16 , UpperCamelCase=1_28 , UpperCamelCase=0.0 , UpperCamelCase=1E-5 , UpperCamelCase=False , UpperCamelCase=0.0 , UpperCamelCase="float32" , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=0.002 , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=3_59_98 , UpperCamelCase=3_59_95 , UpperCamelCase=3_59_99 , **UpperCamelCase , ) -> Dict:
UpperCamelCase__ : List[Any] = vocab_size
UpperCamelCase__ : List[Any] = max_position_embeddings
UpperCamelCase__ : Optional[int] = d_model
UpperCamelCase__ : Tuple = d_ff
UpperCamelCase__ : int = d_ext
UpperCamelCase__ : Tuple = d_spout
UpperCamelCase__ : Tuple = num_switch_layers
UpperCamelCase__ : Any = num_ext_layers
UpperCamelCase__ : Union[str, Any] = num_switch_layers + num_ext_layers
UpperCamelCase__ : Optional[Any] = num_heads
UpperCamelCase__ : List[str] = num_experts
UpperCamelCase__ : Any = expert_capacity
UpperCamelCase__ : Tuple = dropout_rate
UpperCamelCase__ : Any = layer_norm_epsilon
UpperCamelCase__ : List[str] = router_bias
UpperCamelCase__ : Union[str, Any] = router_jitter_noise
UpperCamelCase__ : Optional[Any] = router_dtype
UpperCamelCase__ : Tuple = router_ignore_padding_tokens
UpperCamelCase__ : str = output_hidden_states
UpperCamelCase__ : int = output_attentions
UpperCamelCase__ : Optional[int] = initializer_factor
UpperCamelCase__ : Union[str, Any] = output_router_logits
UpperCamelCase__ : Tuple = use_cache
super().__init__(
separator_token_id=__snake_case , pad_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case , )
| 410 |
__UpperCAmelCase : int = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : Optional[Any] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__UpperCAmelCase : list[bool | None] = [None] * 10_000_000
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : List[Any] = False
def lowerCamelCase_ ( UpperCamelCase_ ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_a : Optional[Any] = chain(next_number(UpperCamelCase_ ) )
_a : Dict = number_chain
while number < 1000_0000:
_a : Any = number_chain
number *= 10
return number_chain
def lowerCamelCase_ ( UpperCamelCase_ = 1000_0000 ):
for i in range(1 , UpperCamelCase_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 471 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
for attribute in key.split('''.''' ):
A_ : Dict = getattr(_UpperCAmelCase , _UpperCAmelCase )
if weight_type is not None:
A_ : Union[str, Any] = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape
else:
A_ : int = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A_ : Optional[Any] = value
elif weight_type == "weight_g":
A_ : str = value
elif weight_type == "weight_v":
A_ : List[Any] = value
elif weight_type == "bias":
A_ : str = value
else:
A_ : Optional[Any] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = []
A_ : List[str] = fairseq_model.state_dict()
A_ : List[str] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ : Any = False
if "conv_layers" in name:
load_conv_layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
A_ : str = True
else:
for key, mapped_key in MAPPING.items():
A_ : str = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
A_ : Optional[Any] = True
if "*" in mapped_key:
A_ : List[str] = name.split(_UpperCAmelCase )[0].split('''.''' )[-2]
A_ : List[Any] = mapped_key.replace('''*''' , _UpperCAmelCase )
if "weight_g" in name:
A_ : List[str] = '''weight_g'''
elif "weight_v" in name:
A_ : int = '''weight_v'''
elif "weight" in name:
A_ : int = '''weight'''
elif "bias" in name:
A_ : List[Any] = '''bias'''
else:
A_ : Tuple = None
set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
continue
if not is_used:
unused_weights.append(_UpperCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[int] = full_name.split('''conv_layers.''' )[-1]
A_ : Any = name.split('''.''' )
A_ : Optional[int] = int(items[0] )
A_ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A_ : Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A_ : Optional[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A_ : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A_ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCAmelCase )
@torch.no_grad()
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True ):
"""simple docstring"""
if config_path is not None:
A_ : List[Any] = HubertConfig.from_pretrained(_UpperCAmelCase )
else:
A_ : Optional[Any] = HubertConfig()
if is_finetuned:
if dict_path:
A_ : Optional[Any] = Dictionary.load(_UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ : List[Any] = target_dict.pad_index
A_ : List[str] = target_dict.bos_index
A_ : int = target_dict.eos_index
A_ : Union[str, Any] = len(target_dict.symbols )
A_ : str = os.path.join(_UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_UpperCAmelCase ) )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , _UpperCAmelCase )
A_ : List[str] = WavaVecaCTCTokenizer(
_UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_UpperCAmelCase , )
A_ : Optional[Any] = True if config.feat_extract_norm == '''layer''' else False
A_ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
A_ : List[Any] = WavaVecaProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
A_ : Dict = HubertForCTC(_UpperCAmelCase )
else:
A_ : str = HubertModel(_UpperCAmelCase )
if is_finetuned:
A_ , A_ , A_ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
A_ , A_ , A_ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A_ : Tuple = model[0].eval()
recursively_load_weights(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
hf_wavavec.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 361 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_lowerCamelCase : List[Any] = 'Create a default config file for Accelerate with only a few flags set.'
def lowercase_ ( _UpperCAmelCase="no" , _UpperCAmelCase = default_json_config_file , _UpperCAmelCase = False ):
"""simple docstring"""
A_ : int = Path(_UpperCAmelCase )
path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
A_ : List[Any] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
A_ : Tuple = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
A_ : Optional[Any] = torch.cuda.device_count()
A_ : Optional[Any] = num_gpus
A_ : int = False
if num_gpus > 1:
A_ : List[str] = '''MULTI_GPU'''
else:
A_ : Optional[int] = '''NO'''
elif is_xpu_available() and use_xpu:
A_ : Union[str, Any] = torch.xpu.device_count()
A_ : Dict = num_xpus
A_ : List[str] = False
if num_xpus > 1:
A_ : str = '''MULTI_XPU'''
else:
A_ : str = '''NO'''
elif is_npu_available():
A_ : Tuple = torch.npu.device_count()
A_ : Tuple = num_npus
A_ : List[str] = False
if num_npus > 1:
A_ : Any = '''MULTI_NPU'''
else:
A_ : List[Any] = '''NO'''
else:
A_ : List[Any] = 0
A_ : Dict = True
A_ : Tuple = 1
A_ : int = '''NO'''
A_ : Union[str, Any] = ClusterConfig(**_UpperCAmelCase )
config.to_json_file(_UpperCAmelCase )
return path
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = parser.add_parser('''default''' , parents=_UpperCAmelCase , help=_UpperCAmelCase , formatter_class=_UpperCAmelCase )
parser.add_argument(
'''--config_file''' , default=_UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=_UpperCAmelCase , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=_UpperCAmelCase )
return parser
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Dict = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 361 | 1 |
"""simple docstring"""
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->float:
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
_lowerCamelCase : Optional[int] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_lowercase ) )
return round(_lowercase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 434 | import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase=None , **__UpperCAmelCase ):
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
__A : Optional[Any] = model
__A : str = kwargs.get("model_save_dir" , __UpperCAmelCase )
__A : List[str] = kwargs.get("latest_model_name" , __UpperCAmelCase )
def __call__( self , **__UpperCAmelCase ):
__A : Any = {k: np.array(__UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(__UpperCAmelCase , __UpperCAmelCase )
@staticmethod
def __UpperCAmelCase( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ):
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
__A : Optional[Any] = "CPUExecutionProvider"
return ort.InferenceSession(__UpperCAmelCase , providers=[provider] , sess_options=__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
__A : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__A : Any = self.model_save_dir.joinpath(self.latest_model_name )
__A : List[str] = Path(__UpperCAmelCase ).joinpath(__UpperCAmelCase )
try:
shutil.copyfile(__UpperCAmelCase , __UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__A : str = self.model_save_dir.joinpath(__UpperCAmelCase )
if src_path.exists():
__A : Any = Path(__UpperCAmelCase ).joinpath(__UpperCAmelCase )
try:
shutil.copyfile(__UpperCAmelCase , __UpperCAmelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase( self , __UpperCAmelCase , **__UpperCAmelCase , ):
if os.path.isfile(__UpperCAmelCase ):
logger.error(F"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
# saving model weights/files
self._save_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def __UpperCAmelCase( cls , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
__A : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__UpperCAmelCase ):
__A : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , provider=__UpperCAmelCase , sess_options=__UpperCAmelCase )
__A : List[Any] = Path(__UpperCAmelCase )
# load model from hub
else:
# download model
__A : List[str] = hf_hub_download(
repo_id=__UpperCAmelCase , filename=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , revision=__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , )
__A : Optional[int] = Path(__UpperCAmelCase ).parent
__A : List[str] = Path(__UpperCAmelCase ).name
__A : List[str] = OnnxRuntimeModel.load_model(__UpperCAmelCase , provider=__UpperCAmelCase , sess_options=__UpperCAmelCase )
return cls(model=__UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def __UpperCAmelCase( cls , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
__A : Tuple = None
if len(str(__UpperCAmelCase ).split("@" ) ) == 2:
__A , __A : int = model_id.split("@" )
return cls._from_pretrained(
model_id=__UpperCAmelCase , revision=__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , **__UpperCAmelCase , )
| 520 | 0 |
class A__ :
"""simple docstring"""
def __init__( self : List[Any] ):
a__ : Optional[int] = {}
def _UpperCamelCase( self : List[Any] ):
print(self.vertex )
for i in self.vertex:
print(lowerCamelCase__ , " -> " , " -> ".join([str(lowerCamelCase__ ) for j in self.vertex[i]] ) )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowerCamelCase__ )
else:
# else make a new vertex
a__ : Optional[Any] = [to_vertex]
def _UpperCamelCase( self : Union[str, Any] ):
# visited array for storing already visited nodes
a__ : Tuple = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] ):
# mark start vertex as visited
a__ : Dict = True
print(lowerCamelCase__ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 702 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 151 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = []
for part_id in partition_order:
_lowerCAmelCase : List[str] = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(lowerCAmelCase__ ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : Union[str, Any] = spark.range(1_00 ).repartition(1 )
_lowerCAmelCase : str = Spark(lowerCAmelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : Union[str, Any] = spark.range(10 ).repartition(2 )
_lowerCAmelCase : Tuple = [1, 0]
_lowerCAmelCase : str = _generate_iterable_examples(lowerCAmelCase__ , lowerCAmelCase__ ) # Reverse the partitions.
_lowerCAmelCase : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , lowerCAmelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : List[Any] = spark.range(10 ).repartition(1 )
_lowerCAmelCase : Tuple = SparkExamplesIterable(lowerCAmelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : List[str] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : Dict = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
_lowerCAmelCase : Dict = lambda lowerCAmelCase__ : x.reverse()
_lowerCAmelCase : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , [2, 1, 0] )
_lowerCAmelCase : Union[str, Any] = SparkExamplesIterable(lowerCAmelCase__ ).shuffle_data_sources(lowerCAmelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_lowerCAmelCase : List[Any] = SparkExamplesIterable(lowerCAmelCase__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCAmelCase : str = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
_lowerCAmelCase , _lowerCAmelCase : int = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_lowerCAmelCase : Union[str, Any] = SparkExamplesIterable(lowerCAmelCase__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCAmelCase : Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
_lowerCAmelCase , _lowerCAmelCase : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : Optional[Any] = spark.range(1_00 ).repartition(1 )
_lowerCAmelCase : Union[str, Any] = Spark(lowerCAmelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 424 | import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
snake_case = logging.get_logger(__name__)
snake_case = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowerCAmelCase : Optional[Any] = model_type_to_module_name(lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = importlib.import_module(f""".{module_name}""" , "transformers.models" )
try:
return getattr(lowerCAmelCase__ , lowerCAmelCase__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase__ , "__name__" , lowerCAmelCase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowerCAmelCase : Any = importlib.import_module("transformers" )
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
return getattr(lowerCAmelCase__ , lowerCAmelCase__ )
return None
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , **lowerCAmelCase__ , ):
"""simple docstring"""
_lowerCAmelCase : str = get_file_from_repo(
lowerCAmelCase__ , lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(lowerCAmelCase__ , encoding="utf-8" ) as reader:
return json.load(lowerCAmelCase__ )
class __A :
'''simple docstring'''
def __init__( self ):
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(_snake_case )
def SCREAMING_SNAKE_CASE__ ( cls , _snake_case , **_snake_case ):
_lowerCAmelCase : Tuple = kwargs.pop("config" , _snake_case )
_lowerCAmelCase : Optional[Any] = kwargs.pop("trust_remote_code" , _snake_case )
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = ImageProcessingMixin.get_image_processor_dict(_snake_case , **_snake_case )
_lowerCAmelCase : Optional[int] = config_dict.get("image_processor_type" , _snake_case )
_lowerCAmelCase : Optional[Any] = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
_lowerCAmelCase : Dict = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_lowerCAmelCase : Dict = config_dict.pop("feature_extractor_type" , _snake_case )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
_lowerCAmelCase : Any = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
_lowerCAmelCase : Dict = config_dict["auto_map"]["AutoFeatureExtractor"]
_lowerCAmelCase : List[Any] = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_snake_case , _snake_case ):
_lowerCAmelCase : str = AutoConfig.from_pretrained(_snake_case , **_snake_case )
# It could be in `config.image_processor_type``
_lowerCAmelCase : Any = getattr(_snake_case , "image_processor_type" , _snake_case )
if hasattr(_snake_case , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
_lowerCAmelCase : Optional[int] = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
_lowerCAmelCase : str = image_processor_class_from_name(_snake_case )
_lowerCAmelCase : List[Any] = image_processor_auto_map is not None
_lowerCAmelCase : Optional[int] = image_processor_class is not None or type(_snake_case ) in IMAGE_PROCESSOR_MAPPING
_lowerCAmelCase : List[Any] = resolve_trust_remote_code(
_snake_case , _snake_case , _snake_case , _snake_case )
if has_remote_code and trust_remote_code:
_lowerCAmelCase : str = get_class_from_dynamic_module(
_snake_case , _snake_case , **_snake_case )
_lowerCAmelCase : Optional[Any] = kwargs.pop("code_revision" , _snake_case )
if os.path.isdir(_snake_case ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_snake_case , **_snake_case )
elif image_processor_class is not None:
return image_processor_class.from_dict(_snake_case , **_snake_case )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_snake_case ) in IMAGE_PROCESSOR_MAPPING:
_lowerCAmelCase : Dict = IMAGE_PROCESSOR_MAPPING[type(_snake_case )]
return image_processor_class.from_dict(_snake_case , **_snake_case )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _snake_case , _snake_case ):
IMAGE_PROCESSOR_MAPPING.register(_snake_case , _snake_case )
| 424 | 1 |
"""simple docstring"""
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Union[str, Any] = 'philschmid/bart-large-cnn-samsum'
__lowerCAmelCase : List[str] = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
__lowerCAmelCase : List[Any] = 'summarizer'
__lowerCAmelCase : Optional[Any] = AutoTokenizer
__lowerCAmelCase : List[str] = AutoModelForSeqaSeqLM
__lowerCAmelCase : int = ['text']
__lowerCAmelCase : str = ['text']
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return self.pre_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , truncation=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
return self.model.generate(**_SCREAMING_SNAKE_CASE )[0]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
return self.pre_processor.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
| 704 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int ):
return 1 if input_a == input_a else 0
def _snake_case ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 359 | 0 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : complex , _SCREAMING_SNAKE_CASE : str = "x" , _SCREAMING_SNAKE_CASE : float = 10**-10 , _SCREAMING_SNAKE_CASE : int = 1 , ):
'''simple docstring'''
_UpperCAmelCase = symbols(snake_case__ )
_UpperCAmelCase = lambdify(snake_case__ , snake_case__ )
_UpperCAmelCase = lambdify(snake_case__ , diff(snake_case__ , snake_case__ ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(snake_case__ ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(snake_case__ ) / diff_function(
snake_case__ )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}''')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f'''{newton_raphson("log(y) - 1", 2, variable="y")}''',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f'''{newton_raphson("exp(x) - 1", 10, precision=0.005)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
| 602 |
import os
import string
import sys
SCREAMING_SNAKE_CASE__ : List[str] = 1 << 8
SCREAMING_SNAKE_CASE__ : str = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
SCREAMING_SNAKE_CASE__ : List[str] = KEYMAP['up']
SCREAMING_SNAKE_CASE__ : Optional[int] = KEYMAP['left']
if sys.platform == "win32":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Dict = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
SCREAMING_SNAKE_CASE__ : List[str] = ord(str(i))
def a__ ( ):
if os.name == "nt":
import msvcrt
_UpperCAmelCase : int = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(snake_case__ ) == 0:
# Read the keystroke
_UpperCAmelCase : Any = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_UpperCAmelCase : str = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_UpperCAmelCase : Optional[int] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(snake_case__ )
if ord(snake_case__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_UpperCAmelCase : Any = chr(KEYMAP["""esc"""] )
except KeyError:
_UpperCAmelCase : Optional[int] = cha[1]
else:
_UpperCAmelCase : Union[str, Any] = ch.decode(snake_case__ )
else:
_UpperCAmelCase : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_UpperCAmelCase : List[str] = sys.stdin.fileno()
_UpperCAmelCase : Optional[Any] = termios.tcgetattr(snake_case__ )
try:
tty.setraw(snake_case__ )
_UpperCAmelCase : Tuple = sys.stdin.read(1 )
finally:
termios.tcsetattr(snake_case__ , termios.TCSADRAIN , snake_case__ )
return ch
def a__ ( ):
_UpperCAmelCase : int = get_raw_chars()
if ord(snake_case__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(snake_case__ ) == KEYMAP["esc"]:
_UpperCAmelCase : int = get_raw_chars()
if ord(snake_case__ ) == KEYMAP["mod_int"]:
_UpperCAmelCase : Optional[Any] = get_raw_chars()
if ord(snake_case__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(snake_case__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(snake_case__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 643 | 0 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = False ) -> Optional[int]:
if radian_mode:
return [magnitude * cos(lowerCamelCase_ ), magnitude * sin(lowerCamelCase_ )]
return [magnitude * cos(radians(lowerCamelCase_ ) ), magnitude * sin(radians(lowerCamelCase_ ) )]
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = 10**-1 ) -> Tuple:
_snake_case = cross(lowerCamelCase_ , lowerCamelCase_ )
_snake_case = sum(lowerCamelCase_ )
return abs(lowerCamelCase_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowercase : Union[str, Any] = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
lowercase : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowercase : Optional[Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowercase : Optional[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowercase : Dict = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
lowercase : str = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 702 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = StableDiffusionInstructPixaPixPipeline
__lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
__lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_snake_case = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_snake_case = CLIPTextModel(lowerCAmelCase_ )
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ):
"""simple docstring"""
_snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('RGB' )
if str(lowerCAmelCase_ ).startswith('mps' ):
_snake_case = torch.manual_seed(lowerCAmelCase_ )
else:
_snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = sd_pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = 'french fries'
_snake_case = sd_pipe(**lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = [inputs['prompt']] * 2
_snake_case = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
_snake_case = torch.from_numpy(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ )
_snake_case = image / 2 + 0.5
_snake_case = image.permute(0 , 3 , 1 , 2 )
_snake_case = image.repeat(2 , 1 , 1 , 1 )
_snake_case = sd_pipe(**lowerCAmelCase_ ).images
_snake_case = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_snake_case = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' )
_snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = sd_pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
_snake_case = [round(lowerCAmelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(lowerCAmelCase_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
_snake_case = VaeImageProcessor(do_resize=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ )
_snake_case = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' ) )[0]
_snake_case = components['vae']
_snake_case = self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_snake_case = vae.encode(inputs[image_param] ).latent_dist.mode()
_snake_case = pipe(**lowerCAmelCase_ )[0]
_snake_case = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase_ , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , lowerCAmelCase_=0 ):
"""simple docstring"""
_snake_case = torch.manual_seed(lowerCAmelCase_ )
_snake_case = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
_snake_case = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
_snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
_snake_case = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 0
def callback_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_snake_case = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_snake_case = latents[0, -3:, -3:, -1]
_snake_case = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_snake_case = latents[0, -3:, -3:, -1]
_snake_case = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_snake_case = False
_snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
_snake_case = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
pipe(**lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
_snake_case = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_snake_case = self.get_inputs()
_snake_case = pipe(**lowerCAmelCase_ )
_snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_snake_case = inputs['image'].resize((5_04, 5_04) )
_snake_case = 'timbrooks/instruct-pix2pix'
_snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_snake_case = pipe(**lowerCAmelCase_ )
_snake_case = output.images[0]
_snake_case = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
_snake_case = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 542 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 431 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 431 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Optional[Any] = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 155 |
import json
import sys
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""") as f:
__snake_case: Tuple = json.load(SCREAMING_SNAKE_CASE__)
__snake_case: Union[str, Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(SCREAMING_SNAKE_CASE__):
__snake_case: str = results[benchmark_name]
__snake_case: List[Any] = benchmark_name.split("""/""")[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''')
__snake_case: Optional[Any] = """| metric |"""
__snake_case: Optional[Any] = """|--------|"""
__snake_case: Dict = """| new / old (diff) |"""
for metric_name in sorted(SCREAMING_SNAKE_CASE__):
__snake_case: Tuple = benchmark_res[metric_name]
__snake_case: int = metric_vals["""new"""]
__snake_case: List[str] = metric_vals.get("""old""" , SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = metric_vals.get("""diff""" , SCREAMING_SNAKE_CASE__)
__snake_case: Optional[int] = F''' {new_val:f}''' if isinstance(SCREAMING_SNAKE_CASE__ , (int, float)) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(SCREAMING_SNAKE_CASE__ , (int, float)) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(SCREAMING_SNAKE_CASE__ , (int, float)) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""")
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""") as f:
f.writelines("""\n""".join(SCREAMING_SNAKE_CASE__))
if __name__ == "__main__":
__UpperCAmelCase : List[Any] = sys.argv[1]
__UpperCAmelCase : Any = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 155 | 1 |
'''simple docstring'''
from math import factorial
_UpperCamelCase = {str(digit): factorial(digit) for digit in range(10)}
def a_ ( _lowerCAmelCase ) -> int:
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase = 60 ,_lowerCAmelCase = 1000000 ) -> int:
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ) or not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
__lowerCamelCase : Dict = 0
# the cached sizes of the previous chains
__lowerCamelCase : List[Any] = {}
for start_chain_element in range(1 ,_lowerCAmelCase ):
# The temporary set will contain the elements of the chain
__lowerCamelCase : Optional[Any] = set()
__lowerCamelCase : Dict = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__lowerCamelCase : Union[str, Any] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_lowerCAmelCase )
chain_set_length += 1
__lowerCamelCase : Union[str, Any] = digit_factorial_sum(_lowerCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__lowerCamelCase : Optional[int] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 459 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class lowerCamelCase ( __lowerCamelCase ):
UpperCamelCase_ : Optional[Any] = 'MCTCTFeatureExtractor'
UpperCamelCase_ : List[Any] = 'AutoTokenizer'
def __init__( self :Tuple , lowercase :List[str] , lowercase :Dict ) -> Tuple:
"""simple docstring"""
super().__init__(lowercase , lowercase )
SCREAMING_SNAKE_CASE = self.feature_extractor
SCREAMING_SNAKE_CASE = False
def __call__( self :Union[str, Any] , *lowercase :Union[str, Any] , **lowercase :str ) -> int:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowercase , **lowercase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE = kwargs.pop('''audio''' , lowercase )
SCREAMING_SNAKE_CASE = kwargs.pop('''sampling_rate''' , lowercase )
SCREAMING_SNAKE_CASE = kwargs.pop('''text''' , lowercase )
if len(lowercase ) > 0:
SCREAMING_SNAKE_CASE = args[0]
SCREAMING_SNAKE_CASE = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE = self.feature_extractor(lowercase , *lowercase , sampling_rate=lowercase , **lowercase )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(lowercase , **lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE = encodings['''input_ids''']
return inputs
def snake_case__ ( self :Dict , *lowercase :Union[str, Any] , **lowercase :List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def snake_case__ ( self :List[Any] , *lowercase :List[Any] , **lowercase :List[str] ) -> int:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowercase , **lowercase )
SCREAMING_SNAKE_CASE = kwargs.pop('''input_features''' , lowercase )
SCREAMING_SNAKE_CASE = kwargs.pop('''labels''' , lowercase )
if len(lowercase ) > 0:
SCREAMING_SNAKE_CASE = args[0]
SCREAMING_SNAKE_CASE = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE = self.feature_extractor.pad(lowercase , *lowercase , **lowercase )
if labels is not None:
SCREAMING_SNAKE_CASE = self.tokenizer.pad(lowercase , **lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE = labels['''input_ids''']
return input_features
def snake_case__ ( self :Dict , *lowercase :List[str] , **lowercase :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowercase , **lowercase )
@contextmanager
def snake_case__ ( self :str ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.tokenizer
yield
SCREAMING_SNAKE_CASE = self.feature_extractor
SCREAMING_SNAKE_CASE = False | 201 | 0 |
'''simple docstring'''
def lowercase_ ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = set()
# Replace all the whitespace in our sentence
lowerCamelCase_ : Any = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowercase ) == 26
def lowercase_ ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
lowerCamelCase_ : int = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase_ : str = True
elif char.isupper():
lowerCamelCase_ : Tuple = True
return all(_lowercase )
def lowercase_ ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def lowercase_ ( ) -> None:
'''simple docstring'''
from timeit import timeit
lowerCamelCase_ : Tuple = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=_lowercase ) )
print(timeit('''is_pangram_faster()''' , setup=_lowercase ) )
print(timeit('''is_pangram_fastest()''' , setup=_lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 706 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : List[str] = '''▁'''
__lowercase : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__lowercase : List[str] = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
__lowercase : Union[str, Any] = {
'''xlm-roberta-base''': 512,
'''xlm-roberta-large''': 512,
'''xlm-roberta-large-finetuned-conll02-dutch''': 512,
'''xlm-roberta-large-finetuned-conll02-spanish''': 512,
'''xlm-roberta-large-finetuned-conll03-english''': 512,
'''xlm-roberta-large-finetuned-conll03-german''': 512,
}
class __lowercase ( _lowercase ):
lowerCamelCase : Any = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__(self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
lowerCamelCase_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
lowerCamelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
lowerCamelCase_ : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase_ : str = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase_ : int = 1
lowerCamelCase_ : Union[str, Any] = len(self.sp_model ) + self.fairseq_offset
lowerCamelCase_ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ):
lowerCamelCase_ : Any = self.__dict__.copy()
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__(self , A ):
lowerCamelCase_ : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase_ : str = {}
lowerCamelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__ (self , A , A = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ : Optional[int] = [self.cls_token_id]
lowerCamelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : List[str] = [self.sep_token_id]
lowerCamelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__ (self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ (self , A ):
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase__ (self , A ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ : int = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ (self , A ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : str = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
lowerCamelCase_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 357 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : Optional[int] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""gptj"""
a : Optional[int] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" )
__lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
__lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 13
| 689 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 689 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
@dataclass
class A_ :
_SCREAMING_SNAKE_CASE = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_SCREAMING_SNAKE_CASE = field(
default=a_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
_SCREAMING_SNAKE_CASE = field(
default=a_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
_SCREAMING_SNAKE_CASE = field(
default=a_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_SCREAMING_SNAKE_CASE = field(
default=a_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
_SCREAMING_SNAKE_CASE = field(
default=a_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class A_ :
_SCREAMING_SNAKE_CASE = field(
default=a_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_SCREAMING_SNAKE_CASE = field(
default=a_ , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
_SCREAMING_SNAKE_CASE = field(
default=a_ , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
_SCREAMING_SNAKE_CASE = field(
default=a_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_SCREAMING_SNAKE_CASE = field(
default=a_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_SCREAMING_SNAKE_CASE = field(
default=a_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_SCREAMING_SNAKE_CASE = field(
default=a_ , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
_SCREAMING_SNAKE_CASE = field(
default=a_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
_SCREAMING_SNAKE_CASE = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_SCREAMING_SNAKE_CASE = field(
default=a_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_SCREAMING_SNAKE_CASE = field(
default=a_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def __A ( ):
"""simple docstring"""
__a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__a , __a , __a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , _A )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__a = training_args.get_process_log_level()
logger.setLevel(_A )
datasets.utils.logging.set_verbosity(_A )
transformers.utils.logging.set_verbosity(_A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
__a = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
__a = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__a = train_dataset.features["label"].names
if training_args.do_eval:
__a = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__a = eval_dataset.features["label"].names
if training_args.do_predict:
__a = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__a = predict_dataset.features["label"].names
# Labels
__a = len(_A )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_A , idalabel={str(_A ): label for i, label in enumerate(_A )} , labelaid={label: i for i, label in enumerate(_A )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__a = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
__a = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__a = False
def preprocess_function(_A ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=_A , max_length=data_args.max_seq_length , truncation=_A , )
if training_args.do_train:
if data_args.max_train_samples is not None:
__a = min(len(_A ) , data_args.max_train_samples )
__a = train_dataset.select(range(_A ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__a = train_dataset.map(
_A , batched=_A , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_A ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__a = min(len(_A ) , data_args.max_eval_samples )
__a = eval_dataset.select(range(_A ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__a = eval_dataset.map(
_A , batched=_A , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
__a = min(len(_A ) , data_args.max_predict_samples )
__a = predict_dataset.select(range(_A ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
__a = predict_dataset.map(
_A , batched=_A , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
__a = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_A ):
__a = p.predictions[0] if isinstance(p.predictions , _A ) else p.predictions
__a = np.argmax(_A , axis=1 )
return metric.compute(predictions=_A , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__a = default_data_collator
elif training_args.fpaa:
__a = DataCollatorWithPadding(_A , pad_to_multiple_of=8 )
else:
__a = None
# Initialize our Trainer
__a = Trainer(
model=_A , args=_A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_A , tokenizer=_A , data_collator=_A , )
# Training
if training_args.do_train:
__a = None
if training_args.resume_from_checkpoint is not None:
__a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a = last_checkpoint
__a = trainer.train(resume_from_checkpoint=_A )
__a = train_result.metrics
__a = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_A )
)
__a = min(_A , len(_A ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , _A )
trainer.save_metrics("train" , _A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__a = trainer.evaluate(eval_dataset=_A )
__a = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_A )
__a = min(_A , len(_A ) )
trainer.log_metrics("eval" , _A )
trainer.save_metrics("eval" , _A )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
__a , __a , __a = trainer.predict(_A , metric_key_prefix="predict" )
__a = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_A )
)
__a = min(_A , len(_A ) )
trainer.log_metrics("predict" , _A )
trainer.save_metrics("predict" , _A )
__a = np.argmax(_A , axis=1 )
__a = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(_A , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(_A ):
__a = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 712 | class A_ :
def __init__( self : List[Any] ):
__a = {} # Mapping from char to TrieNode
__a = False
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : list[str] ):
for word in words:
self.insert(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any , __SCREAMING_SNAKE_CASE : str ):
__a = self
for char in word:
if char not in curr.nodes:
__a = TrieNode()
__a = curr.nodes[char]
__a = True
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ):
__a = self
for char in word:
if char not in curr.nodes:
return False
__a = curr.nodes[char]
return curr.is_leaf
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ):
def _delete(__SCREAMING_SNAKE_CASE : TrieNode , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ) -> bool:
if index == len(__SCREAMING_SNAKE_CASE ):
# If word does not exist
if not curr.is_leaf:
return False
__a = False
return len(curr.nodes ) == 0
__a = word[index]
__a = curr.nodes.get(__SCREAMING_SNAKE_CASE )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__a = _delete(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __SCREAMING_SNAKE_CASE , 0 )
def __A ( _A , _A ):
"""simple docstring"""
if node.is_leaf:
print(_A , end=" " )
for key, value in node.nodes.items():
print_words(_A , word + key )
def __A ( ):
"""simple docstring"""
__a = "banana bananas bandana band apple all beast".split()
__a = TrieNode()
root.insert_many(_A )
# print_words(root, "")
assert all(root.find(_A ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def __A ( _A , _A ):
"""simple docstring"""
print(str(_A ) , "works!" if passes else "doesn't work :(" )
def __A ( ):
"""simple docstring"""
assert test_trie()
def __A ( ):
"""simple docstring"""
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 525 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 203 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = 1000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 203 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=_UpperCAmelCase , )
assert hasattr(self , 'env' )
def a__ ( self : Optional[Any] , _UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
__lowercase = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_UpperCAmelCase , instance_count=_UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=_UpperCAmelCase , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_UpperCAmelCase , py_version='py36' , )
def a__ ( self : List[Any] , _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
TrainingJobAnalytics(_UpperCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.create_estimator(_UpperCAmelCase )
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _UpperCAmelCase )
| 714 |
import string
from math import logaa
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> int:
__lowercase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__lowercase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> tuple[int, int]:
__lowercase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase = corpus_without_punctuation.split('\n' )
__lowercase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
return round(tf * idf , 3 )
| 688 | 0 |
from math import pi
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 456 |
import collections
import os
import re
from pathlib import Path
A_ : List[str] = 'src/transformers'
# Matches is_xxx_available()
A_ : Any = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
A_ : Optional[int] = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
A_ : Dict = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
A_ : Dict = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
A_ : Tuple = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
A_ : List[Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
A_ : Dict = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
A_ : Tuple = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
A_ : Union[str, Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
A_ : Any = re.compile(r'^\s*try:')
# Catches a line with else:
A_ : Optional[Any] = re.compile(r'^\s*else:')
def UpperCamelCase (lowercase_: Optional[Any] ) -> Any:
if _re_test_backend.search(lowercase_ ) is None:
return None
A__ : Optional[int] = [b[0] for b in _re_backend.findall(lowercase_ )]
backends.sort()
return "_and_".join(lowercase_ )
def UpperCamelCase (lowercase_: Any ) -> Dict:
with open(lowercase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A__ : Optional[Any] = f.readlines()
A__ : Optional[Any] = 0
while line_index < len(lowercase_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase_ ):
return None
# First grab the objects without a specific backend in _import_structure
A__ : List[Any] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
A__ : Tuple = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase_ ):
A__ : str = _re_one_line_import_struct.search(lowercase_ ).groups()[0]
A__ : Union[str, Any] = re.findall(r"""\[([^\]]+)\]""" , lowercase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
A__ : int = _re_import_struct_key_value.search(lowercase_ )
if single_line_import_search is not None:
A__ : int = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
A__ : str = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
A__ : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ : Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
A__ : Any = lines[line_index]
if _re_import_struct_add_one.search(lowercase_ ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase_ ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase_ ) is not None:
A__ : Any = _re_import_struct_add_many.search(lowercase_ ).groups()[0].split(""", """ )
A__ : int = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif _re_between_brackets.search(lowercase_ ) is not None:
A__ : Any = _re_between_brackets.search(lowercase_ ).groups()[0].split(""", """ )
A__ : Any = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif _re_quote_object.search(lowercase_ ) is not None:
objects.append(_re_quote_object.search(lowercase_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
A__ : List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A__ : Any = []
while (
line_index < len(lowercase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
A__ : Dict = lines[line_index]
A__ : Any = _re_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
A__ : List[str] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
A__ : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
A__ : Union[str, Any] = lines[line_index]
A__ : List[str] = _re_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
A__ : Tuple = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Union[str, Any] ) -> List[Any]:
def find_duplicates(lowercase_: Tuple ):
return [k for k, v in collections.Counter(lowercase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A__ : str = []
for key in import_dict_objects.keys():
A__ : Optional[int] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
A__ : Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A__ : Tuple = """base imports""" if key == """none""" else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def UpperCamelCase () -> str:
A__ : str = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
A__ : Tuple = os.path.join(lowercase_ , """__init__.py""" )
A__ : Union[str, Any] = parse_init(lowercase_ )
if objects is not None:
A__ : List[Any] = analyze_results(*lowercase_ )
if len(lowercase_ ) > 0:
A__ : int = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("""\n""".join(lowercase_ ) )
if len(lowercase_ ) > 0:
raise ValueError("""\n\n""".join(lowercase_ ) )
def UpperCamelCase () -> Dict:
A__ : int = []
for path, directories, files in os.walk(lowercase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(lowercase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
A__ : Union[str, Any] = str((Path(lowercase_ ) / folder).relative_to(lowercase_ ) )
A__ : List[str] = short_path.replace(os.path.sep , """.""" )
submodules.append(lowercase_ )
for fname in files:
if fname == "__init__.py":
continue
A__ : Any = str((Path(lowercase_ ) / fname).relative_to(lowercase_ ) )
A__ : Union[str, Any] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(lowercase_ )
return submodules
A_ : str = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def UpperCamelCase () -> str:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
A__ : Any = direct_transformers_import(lowercase_ )
A__ : List[str] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowercase_ , """__init__.py""" ) , """r""" ) as f:
A__ : str = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , lowercase_ ) ) )
A__ : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowercase_ ) > 0:
A__ : Dict = """\n""".join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f"""{list_of_modules}\n"""
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 456 | 1 |
from __future__ import annotations
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , _snake_case : str , _snake_case : str ):
"""simple docstring"""
A__ , A__ = text, pattern
A__ , A__ = len(_snake_case ), len(_snake_case )
def _a ( self : Optional[Any] , _snake_case : str ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _a ( self : Any , _snake_case : int ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = []
for i in range(self.textLen - self.patLen + 1 ):
A__ = self.mismatch_in_text(_snake_case )
if mismatch_index == -1:
positions.append(_snake_case )
else:
A__ = self.match_in_pattern(self.text[mismatch_index] )
A__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE__ = '''ABAABA'''
SCREAMING_SNAKE_CASE__ = '''AB'''
SCREAMING_SNAKE_CASE__ = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE__ = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 717 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Optional[int] ):
"""simple docstring"""
return AutoConfig.from_pretrained(_snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
def _a ( self : int ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _a ( self : str ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _a ( self : str ):
"""simple docstring"""
with self.assertRaises(_snake_case ):
create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
| 52 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {"vocab_file": "spiece.model"}
__A = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , __SCREAMING_SNAKE_CASE : str="<unk>" , __SCREAMING_SNAKE_CASE : str="<sep>" , __SCREAMING_SNAKE_CASE : Any="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<cls>" , __SCREAMING_SNAKE_CASE : Optional[int]="<mask>" , __SCREAMING_SNAKE_CASE : Tuple=["<eop>", "<eod>"] , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> None:
__UpperCAmelCase =AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
__UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =3
__UpperCAmelCase =do_lower_case
__UpperCAmelCase =remove_space
__UpperCAmelCase =keep_accents
__UpperCAmelCase =vocab_file
__UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
__UpperCAmelCase =jieba
__UpperCAmelCase =str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _a ( self : Optional[Any] ) -> List[Any]:
return len(self.sp_model )
def _a ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> List[str]:
__UpperCAmelCase =self.__dict__.copy()
__UpperCAmelCase =None
return state
def __setstate__( self : str , __SCREAMING_SNAKE_CASE : Tuple ) -> str:
__UpperCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCAmelCase ={}
__UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
if self.remove_space:
__UpperCAmelCase =""" """.join(inputs.strip().split() )
else:
__UpperCAmelCase =inputs
__UpperCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
__UpperCAmelCase =unicodedata.normalize("""NFKD""" , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
__UpperCAmelCase =outputs.lower()
return outputs
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
__UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =[]
for piece in pieces:
if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__UpperCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCAmelCase =cur_pieces[1:]
else:
__UpperCAmelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__SCREAMING_SNAKE_CASE )
else:
new_pieces.append(__SCREAMING_SNAKE_CASE )
return new_pieces
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def _a ( self : str , __SCREAMING_SNAKE_CASE : int ) -> Any:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
__UpperCAmelCase ="""""".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase =[self.sep_token_id]
__UpperCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1]
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1]
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase =[self.sep_token_id]
__UpperCAmelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase =os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi:
__UpperCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _a ( self : List[Any] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Dict ) -> Dict:
__UpperCAmelCase =super()._decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 68 |
# Imports
import numpy as np
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :int=None , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :List[str]=None ) ->int:
self.set_matricies(red=lowerCAmelCase__ , green=lowerCAmelCase__ , blue=lowerCAmelCase__ , red_edge=lowerCAmelCase__ , nir=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Optional[Any] , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Optional[Any]=None ) ->List[Any]:
if red is not None:
lowercase = red
if green is not None:
lowercase = green
if blue is not None:
lowercase = blue
if red_edge is not None:
lowercase = red_edge
if nir is not None:
lowercase = nir
return True
def SCREAMING_SNAKE_CASE( self :Optional[Any] , lowerCAmelCase__ :Dict="" , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :int=None , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Union[str, Any]=None ) ->Optional[Any]:
self.set_matricies(red=lowerCAmelCase__ , green=lowerCAmelCase__ , blue=lowerCAmelCase__ , red_edge=lowerCAmelCase__ , nir=lowerCAmelCase__ )
lowercase = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def SCREAMING_SNAKE_CASE( self :Any ) ->int:
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def SCREAMING_SNAKE_CASE( self :List[str] ) ->int:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def SCREAMING_SNAKE_CASE( self :int ) ->Tuple:
return self.nir * (self.red / (self.green**2))
def SCREAMING_SNAKE_CASE( self :List[str] ) ->Dict:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def SCREAMING_SNAKE_CASE( self :str ) ->Optional[int]:
return (self.nir - self.red) / (self.nir + self.red)
def SCREAMING_SNAKE_CASE( self :str ) ->int:
return (self.nir - self.blue) / (self.nir + self.blue)
def SCREAMING_SNAKE_CASE( self :List[str] ) ->List[str]:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->str:
return (self.nir - self.green) / (self.nir + self.green)
def SCREAMING_SNAKE_CASE( self :int ) ->Any:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->List[Any]:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def SCREAMING_SNAKE_CASE( self :Optional[Any] ) ->Union[str, Any]:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def SCREAMING_SNAKE_CASE( self :Any ) ->Union[str, Any]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def SCREAMING_SNAKE_CASE( self :Tuple , lowerCAmelCase__ :Union[str, Any]=0.08 , lowerCAmelCase__ :Dict=1.22 , lowerCAmelCase__ :Tuple=0.03 ) ->Tuple:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def SCREAMING_SNAKE_CASE( self :Dict ) ->Any:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def SCREAMING_SNAKE_CASE( self :Optional[Any] ) ->int:
return (self.nir / self.green) - 1
def SCREAMING_SNAKE_CASE( self :int ) ->List[str]:
return (self.nir / self.redEdge) - 1
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->List[Any]:
return (self.red - self.blue) / self.red
def SCREAMING_SNAKE_CASE( self :Any ) ->int:
lowercase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def SCREAMING_SNAKE_CASE( self :List[str] ) ->Optional[Any]:
return self.nir - self.green
def SCREAMING_SNAKE_CASE( self :str ) ->int:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def SCREAMING_SNAKE_CASE( self :List[str] ) ->Optional[Any]:
lowercase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def SCREAMING_SNAKE_CASE( self :Union[str, Any] , lowerCAmelCase__ :Tuple=0.16 ) ->Any:
return (self.nir - self.green) / (self.nir + self.green + y)
def SCREAMING_SNAKE_CASE( self :Dict , lowerCAmelCase__ :Any=0.5 ) ->str:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def SCREAMING_SNAKE_CASE( self :int ) ->List[str]:
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def SCREAMING_SNAKE_CASE( self :Any , lowerCAmelCase__ :str=None , lowerCAmelCase__ :Tuple=None ) ->Dict:
return (self.nir - b) / (a * self.red)
def SCREAMING_SNAKE_CASE( self :Optional[Any] ) ->Dict:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def SCREAMING_SNAKE_CASE( self :str ) ->List[Any]:
return (self.red + self.green + self.blue) / 30.5
def SCREAMING_SNAKE_CASE( self :Tuple ) ->Optional[Any]:
return self.nir / self.red
def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->Dict:
return (self.rvi() - 1) / (self.rvi() + 1)
def SCREAMING_SNAKE_CASE( self :Optional[Any] ) ->Dict:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def SCREAMING_SNAKE_CASE( self :str ) ->int:
return self.green / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE( self :Any ) ->Any:
return self.nir / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE( self :int ) ->Tuple:
return self.red / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE( self :List[Any] ) ->List[Any]:
return (self.green - self.red) / (self.green + self.red)
def SCREAMING_SNAKE_CASE( self :List[str] ) ->str:
return (self.red - self.green) / (self.red + self.green)
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->List[str]:
lowercase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowercase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def SCREAMING_SNAKE_CASE( self :List[str] ) ->Any:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def SCREAMING_SNAKE_CASE( self :List[str] ) ->List[Any]:
return self.nir / self.red
def SCREAMING_SNAKE_CASE( self :Any ) ->List[str]:
return (self.ndvi() + 0.5) ** (1 / 2)
def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->Union[str, Any]:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 441 | 0 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase : Any = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split('.' ):
UpperCamelCase = getattr(lowercase , lowercase )
if weight_type is not None:
UpperCamelCase = getattr(lowercase , lowercase ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A ( lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(lowercase )[0].split('.' )[-2]
UpperCamelCase = mapped_key.replace('*' , lowercase )
if "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = 'weight'
else:
UpperCamelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = full_name.split('conv_layers.' )[-1]
UpperCamelCase = name.split('.' )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
UpperCamelCase = UniSpeechSatConfig.from_pretrained(lowercase )
else:
UpperCamelCase = UniSpeechSatConfig()
UpperCamelCase = ''
if is_finetuned:
UpperCamelCase = UniSpeechSatForCTC(lowercase )
else:
UpperCamelCase = UniSpeechSatForPreTraining(lowercase )
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
UpperCamelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase : Dict = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
try:
UpperCamelCase = tempfile.mktemp()
with open(A_ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , A_ )
UpperCamelCase = AlbertTokenizer.from_pretrained(A_ )
finally:
os.remove(A_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , A_ )
UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowercase ( unittest.TestCase ):
__lowercase : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __UpperCamelCase ( cls ) -> Tuple:
"""simple docstring"""
UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def __UpperCamelCase ( cls ) -> Optional[int]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A_ , repo_id='test-tokenizer' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = CustomTokenizer(A_ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizerFast.from_pretrained(A_ )
bert_tokenizer.save_pretrained(A_ )
UpperCamelCase = CustomTokenizerFast.from_pretrained(A_ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
UpperCamelCase = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=A_ , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCamelCase = Trie()
UpperCamelCase = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(A_ , ['AB', 'C'] )
| 3 | 1 |
from math import pi, sqrt
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(SCREAMING_SNAKE_CASE_ ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(SCREAMING_SNAKE_CASE_ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __lowerCAmelCase ( ):
assert gamma(0.5 ) == sqrt(SCREAMING_SNAKE_CASE_ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase_ = 1.0
while num:
lowercase_ = float(input("""Gamma of: """))
print(F'gamma({num}) = {gamma(num)}')
print("""\nEnter 0 to exit...""")
| 413 |
import numpy as np
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 413 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _lowercase ( lowerCamelCase__ , unittest.TestCase ):
_UpperCAmelCase = XLNetTokenizer
_UpperCAmelCase = XLNetTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def UpperCamelCase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = XLNetTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ) -> int:
snake_case = '''<s>'''
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<eod>''' )
self.assertEqual(len(__lowerCamelCase ) , 10_06 )
def UpperCamelCase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCamelCase ( self ) -> List[Any]:
snake_case = XLNetTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
snake_case = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [2_85, 46, 10, 1_70, 3_82] )
snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
snake_case = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def UpperCamelCase ( self ) -> Dict:
snake_case = XLNetTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase )
snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] )
def UpperCamelCase ( self ) -> Tuple:
snake_case = XLNetTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase )
snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def UpperCamelCase ( self ) -> List[Any]:
snake_case = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
snake_case = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCamelCase )
snake_case = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCamelCase )
snake_case = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
snake_case = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def UpperCamelCase ( self ) -> int:
snake_case = {'''input_ids''': [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
| 701 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __UpperCamelCase ( a : List[str] ) ->str:
snake_case = []
for line in lines:
snake_case = re.sub(R'''#.*''' , '''''' , a ) # remove comments
if line:
filtered_lines.append(a )
snake_case = '''\n'''.join(a )
# Make a hash from all this code
snake_case = full_str.encode('''utf-8''' )
return shaaaa(a ).hexdigest()
# get importable module names and hash for caching
_lowercase = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_lowercase = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_lowercase = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
_lowercase = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 44 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : Optional[int] = list(snake_case__ )
A_ : List[Any] = list(snake_case__ )
A_ : List[str] = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count += 1
A_ : Optional[int] = """_"""
if count > 1:
return False
else:
return "".join(snake_case__ )
def __UpperCamelCase ( snake_case__ ):
A_ : List[str] = []
while True:
A_ : Any = ["""$"""] * len(snake_case__ )
A_ : int = []
for i in range(len(snake_case__ ) ):
for j in range(i + 1 , len(snake_case__ ) ):
A_ : int = compare_string(binary[i] , binary[j] )
if k is False:
A_ : Optional[int] = """*"""
A_ : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case__ ) == 0:
return pi
A_ : Tuple = list(set(snake_case__ ) )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : str = []
for minterm in minterms:
A_ : Any = """"""
for _ in range(snake_case__ ):
A_ : int = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case__ )
return temp
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : str = list(snake_case__ )
A_ : List[Any] = list(snake_case__ )
A_ : Optional[Any] = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : str = []
A_ : Tuple = [0] * len(snake_case__ )
for i in range(len(chart[0] ) ):
A_ : Optional[int] = 0
A_ : List[str] = -1
for j in range(len(snake_case__ ) ):
if chart[j][i] == 1:
count += 1
A_ : Union[str, Any] = j
if count == 1:
A_ : str = 1
for i in range(len(snake_case__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case__ ) ):
A_ : Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
A_ : List[str] = 0
A_ : List[str] = -1
A_ : Union[str, Any] = 0
for i in range(len(snake_case__ ) ):
A_ : List[str] = chart[i].count(1 )
if count_n > max_n:
A_ : Union[str, Any] = count_n
A_ : Optional[int] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case__ ) ):
A_ : Optional[Any] = 0
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : Optional[int] = [[0 for x in range(len(snake_case__ ) )] for x in range(len(snake_case__ ) )]
for i in range(len(snake_case__ ) ):
A_ : List[str] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__ ):
A_ : Union[str, Any] = 1
return chart
def __UpperCamelCase ( ):
A_ : Union[str, Any] = int(input("""Enter the no. of variables\n""" ) )
A_ : List[Any] = [
float(snake_case__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
A_ : List[str] = decimal_to_binary(snake_case__ , snake_case__ )
A_ : str = check(snake_case__ )
print("""Prime Implicants are:""" )
print(snake_case__ )
A_ : List[str] = prime_implicant_chart(snake_case__ , snake_case__ )
A_ : int = selection(snake_case__ , snake_case__ )
print("""Essential Prime Implicants are:""" )
print(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 180 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[Any] = """informer"""
_A : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__(self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "student_t" , lowerCAmelCase_ = "nll" , lowerCAmelCase_ = 1 , lowerCAmelCase_ = None , lowerCAmelCase_ = "mean" , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = True , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.05 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 100 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_=True , lowerCAmelCase_ = "prob" , lowerCAmelCase_ = 5 , lowerCAmelCase_ = True , **lowerCAmelCase_ , ):
# time series specific configuration
A_ : Optional[Any] = prediction_length
A_ : Dict = context_length or prediction_length
A_ : Dict = distribution_output
A_ : Tuple = loss
A_ : Dict = input_size
A_ : Union[str, Any] = num_time_features
A_ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A_ : Optional[int] = scaling
A_ : Optional[Any] = num_dynamic_real_features
A_ : Tuple = num_static_real_features
A_ : Tuple = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
A_ : List[str] = cardinality
else:
A_ : List[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
A_ : int = embedding_dimension
else:
A_ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
A_ : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
A_ : Dict = d_model
A_ : Dict = encoder_attention_heads
A_ : Dict = decoder_attention_heads
A_ : Any = encoder_ffn_dim
A_ : Tuple = decoder_ffn_dim
A_ : Tuple = encoder_layers
A_ : Optional[int] = decoder_layers
A_ : List[str] = dropout
A_ : List[str] = attention_dropout
A_ : Any = activation_dropout
A_ : Any = encoder_layerdrop
A_ : List[Any] = decoder_layerdrop
A_ : str = activation_function
A_ : Optional[Any] = init_std
A_ : Optional[int] = use_cache
# Informer
A_ : Dict = attention_type
A_ : List[Any] = sampling_factor
A_ : List[Any] = distil
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase(self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 180 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 579 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def __A ( self ) -> int:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> List[str]:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
_UpperCAmelCase = NystromformerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
_UpperCAmelCase = model(snake_case_ , token_type_ids=snake_case_ )
_UpperCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
_UpperCAmelCase = NystromformerForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
_UpperCAmelCase = NystromformerForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NystromformerForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NystromformerForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = NystromformerForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
A__ : List[Any] = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : Dict = (
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : str = False
A__ : Union[str, Any] = False
def __A ( self ) -> Dict:
_UpperCAmelCase = NystromformerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def __A ( self ) -> int:
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __A ( self ) -> str:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*snake_case_ )
def __A ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def __A ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def __A ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def __A ( self ) -> Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def __A ( self ) -> Dict:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = NystromformerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Any:
_UpperCAmelCase = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
_UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_UpperCAmelCase = model(snake_case_ )[0]
_UpperCAmelCase = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1e-4 ) )
@slow
def __A ( self ) -> Any:
_UpperCAmelCase = "the [MASK] of Belgium is Brussels"
_UpperCAmelCase = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
_UpperCAmelCase = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
_UpperCAmelCase = tokenizer(snake_case_ , return_tensors="pt" )
with torch.no_grad():
_UpperCAmelCase = model(encoding.input_ids ).logits
_UpperCAmelCase = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case_ ) , "capital" )
| 579 | 1 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
_enforce_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if n == 0:
return 0
lowerCAmelCase__ :Union[str, Any] = float('-inf' )
for i in range(1 , n + 1 ):
lowerCAmelCase__ :Optional[Any] = max(
_SCREAMING_SNAKE_CASE , prices[i - 1] + naive_cut_rod_recursive(n - i , _SCREAMING_SNAKE_CASE ) )
return max_revue
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
_enforce_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowerCAmelCase__ :int = float('-inf' )
for i in range(1 , n + 1 ):
lowerCAmelCase__ :int = max(
_SCREAMING_SNAKE_CASE , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
lowerCAmelCase__ :int = max_revenue
return max_rev[n]
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
_enforce_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowerCAmelCase__ :Any = [float('-inf' ) for _ in range(n + 1 )]
lowerCAmelCase__ :Tuple = 0
for i in range(1 , n + 1 ):
lowerCAmelCase__ :Any = max_rev[i]
for j in range(1 , i + 1 ):
lowerCAmelCase__ :Tuple = max(_SCREAMING_SNAKE_CASE , prices[j - 1] + max_rev[i - j] )
lowerCAmelCase__ :Union[str, Any] = max_revenue_i
return max_rev[n]
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
if n < 0:
lowerCAmelCase__ :Union[str, Any] = F"n must be greater than or equal to 0. Got n = {n}"
raise ValueError(_SCREAMING_SNAKE_CASE )
if n > len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :str = (
'Each integral piece of rod must have a corresponding price. '
F"Got n = {n} but length of prices = {len(_SCREAMING_SNAKE_CASE )}"
)
raise ValueError(_SCREAMING_SNAKE_CASE )
def __A () ->int:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = [6, 10, 12, 15, 20, 23]
lowerCAmelCase__ :Optional[int] = len(_SCREAMING_SNAKE_CASE )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowerCAmelCase__ :Union[str, Any] = 36
lowerCAmelCase__ :str = top_down_cut_rod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = bottom_up_cut_rod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = naive_cut_rod_recursive(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 93 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> bool:
"""simple docstring"""
lowercase__ = [int(A ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(A ) == 4 and all(0 <= int(A ) <= 254 for octet in octets )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = input().strip()
lowerCamelCase : Union[str, Any] = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 460 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase : Dict = random.Random()
def lowerCAmelCase__ ( _a : str , _a : Optional[int]=1.0 , _a : List[str]=None , _a : str=None ):
if rng is None:
snake_case_ : List[Any] = global_rng
snake_case_ : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=2000 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=1_6000 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , ) -> List[Any]:
snake_case_ : Dict = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : Dict = min_seq_length
snake_case_ : Tuple = max_seq_length
snake_case_ : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ : Dict = feature_size
snake_case_ : List[Any] = padding_value
snake_case_ : str = sampling_rate
snake_case_ : Optional[int] = return_attention_mask
snake_case_ : Optional[int] = do_normalize
def _lowerCAmelCase ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> str:
def _flatten(_SCREAMING_SNAKE_CASE ):
return list(itertools.chain(*_SCREAMING_SNAKE_CASE ) )
if equal_length:
snake_case_ : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case_ : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ : int = [np.asarray(_SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : List[str] = WavaVecaFeatureExtractor
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : Tuple = WavaVecaFeatureExtractionTester(self )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(_SCREAMING_SNAKE_CASE , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_SCREAMING_SNAKE_CASE , axis=0 ) - 1 ) < 1e-3 ) )
def _lowerCAmelCase ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ : Dict = [np.asarray(_SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test not batched input
snake_case_ : str = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
snake_case_ : Dict = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test batched
snake_case_ : str = feat_extract(_SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
snake_case_ : str = feat_extract(_SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ : Optional[int] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case_ : str = np.asarray(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = feat_extract(_SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
snake_case_ : Any = feat_extract(_SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ : List[Any] = ["longest", "max_length", "do_not_pad"]
snake_case_ : int = [None, 1600, None]
for max_length, padding in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ : Any = feat_extract(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors="np" )
snake_case_ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Any = range(800 , 1400 , 200 )
snake_case_ : Tuple = [floats_list((1, x) )[0] for x in lengths]
snake_case_ : Optional[Any] = ["longest", "max_length", "do_not_pad"]
snake_case_ : Optional[Any] = [None, 1600, None]
for max_length, padding in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ : Optional[int] = feat_extract(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
snake_case_ : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ : str = feat_extract(
_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=1000 , padding="max_length" , return_tensors="np" )
snake_case_ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ : Any = feat_extract(
_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=1000 , padding="longest" , return_tensors="np" )
snake_case_ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
snake_case_ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ : Optional[int] = feat_extract(
_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=2000 , padding="longest" , return_tensors="np" )
snake_case_ : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def _lowerCAmelCase ( self ) -> Any:
import torch
snake_case_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : List[Any] = np.random.rand(100 ).astype(np.floataa )
snake_case_ : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case_ : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def _lowerCAmelCase ( self ) -> Optional[int]:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
snake_case_ : Dict = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == "layer" )
| 703 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowercase : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ : List[str] = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(_SCREAMING_SNAKE_CASE ) )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ : List[str] = [sequences]
snake_case_ : List[str] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_SCREAMING_SNAKE_CASE )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE=ZeroShotClassificationArgumentHandler() , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ : Dict = args_parser
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def _lowerCAmelCase ( self ) -> str:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=TruncationStrategy.ONLY_FIRST , **_SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ : List[Any] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
snake_case_ : List[str] = self.tokenizer.eos_token
try:
snake_case_ : Union[str, Any] = self.tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , )
except Exception as e:
if "too short" in str(_SCREAMING_SNAKE_CASE ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
snake_case_ : Union[str, Any] = self.tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
if kwargs.get("multi_class" , _SCREAMING_SNAKE_CASE ) is not None:
snake_case_ : Any = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
snake_case_ : Any = {}
if "candidate_labels" in kwargs:
snake_case_ : Tuple = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
snake_case_ : Optional[Any] = kwargs["hypothesis_template"]
snake_case_ : Dict = {}
if "multi_label" in kwargs:
snake_case_ : List[Any] = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) -> str:
if len(_SCREAMING_SNAKE_CASE ) == 0:
pass
elif len(_SCREAMING_SNAKE_CASE ) == 1 and "candidate_labels" not in kwargs:
snake_case_ : int = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="This example is {}." ) -> str:
snake_case_ , snake_case_ : Optional[int] = self._args_parser(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i, (candidate_label, sequence_pair) in enumerate(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
snake_case_ : str = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_SCREAMING_SNAKE_CASE ) - 1,
**model_input,
}
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ : Optional[Any] = inputs["candidate_label"]
snake_case_ : Dict = inputs["sequence"]
snake_case_ : Optional[int] = {k: inputs[k] for k in self.tokenizer.model_input_names}
snake_case_ : Dict = self.model(**_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
snake_case_ : str = [outputs["candidate_label"] for outputs in model_outputs]
snake_case_ : Union[str, Any] = [outputs["sequence"] for outputs in model_outputs]
snake_case_ : List[Any] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
snake_case_ : Tuple = logits.shape[0]
snake_case_ : List[Any] = len(_SCREAMING_SNAKE_CASE )
snake_case_ : int = N // n
snake_case_ : Optional[Any] = logits.reshape((num_sequences, n, -1) )
if multi_label or len(_SCREAMING_SNAKE_CASE ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
snake_case_ : Any = self.entailment_id
snake_case_ : List[str] = -1 if entailment_id == 0 else 0
snake_case_ : Dict = reshaped_outputs[..., [contradiction_id, entailment_id]]
snake_case_ : List[str] = np.exp(_SCREAMING_SNAKE_CASE ) / np.exp(_SCREAMING_SNAKE_CASE ).sum(-1 , keepdims=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
snake_case_ : List[str] = reshaped_outputs[..., self.entailment_id]
snake_case_ : Optional[Any] = np.exp(_SCREAMING_SNAKE_CASE ) / np.exp(_SCREAMING_SNAKE_CASE ).sum(-1 , keepdims=_SCREAMING_SNAKE_CASE )
snake_case_ : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 114 | 0 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
_UpperCAmelCase = sorted(string.lower() )
return len(UpperCamelCase__ ) == len(set(UpperCamelCase__ ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(f'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 657 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
__magic_name__ : str = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
__magic_name__ : ClassVar[Features] = Features({'image': Image()} )
__magic_name__ : ClassVar[Features] = Features({'labels': ClassLabel} )
__magic_name__ : str = "image"
__magic_name__ : str = "labels"
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
__UpperCamelCase : List[str] = copy.deepcopy(self )
__UpperCamelCase : List[Any] = self.label_schema.copy()
__UpperCamelCase : List[str] = features[self.label_column]
__UpperCamelCase : List[str] = label_schema
return task_template
@property
def lowerCamelCase__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 279 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a_ ( UpperCAmelCase__ ):
lowercase_ : Any = ['''image_processor''', '''tokenizer''']
lowercase_ : int = '''BlipImageProcessor'''
lowercase_ : Dict = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
__snake_case = False
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = self.image_processor
def __call__( self : List[Any] , __lowerCAmelCase : ImageInput = None , __lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : Union[str, Any] , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__snake_case = self.tokenizer
__snake_case = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
return text_encoding
# add pixel_values
__snake_case = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase )
if text is not None:
__snake_case = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
else:
__snake_case = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCAmelCase )
return encoding_image_processor
def lowercase__ ( self : Dict , *__lowerCAmelCase : int , **__lowerCAmelCase : Optional[Any] ):
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def lowercase__ ( self : List[str] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Optional[int] ):
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def lowercase__ ( self : List[Any] ):
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 704 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : int = RobertaTokenizer
lowercase_ : int = RobertaTokenizerFast
lowercase_ : int = True
lowercase_ : Dict = {'''cls_token''': '''<s>'''}
def lowercase__ ( self : Union[str, Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__snake_case = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
__snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__snake_case = {'unk_token': '<unk>'}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__lowerCAmelCase ) )
def lowercase__ ( self : Tuple , **__lowerCAmelCase : List[str] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowercase__ ( self : Dict , **__lowerCAmelCase : Tuple ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : int ):
__snake_case = 'lower newer'
__snake_case = 'lower newer'
return input_text, output_text
def lowercase__ ( self : Union[str, Any] ):
__snake_case = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case = 'lower newer'
__snake_case = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__snake_case = tokenizer.tokenize(__lowerCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def lowercase__ ( self : Tuple ):
__snake_case = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def lowercase__ ( self : int ):
__snake_case = self.tokenizer_class.from_pretrained('roberta-base' )
__snake_case = tokenizer.encode('sequence builders' , add_special_tokens=__lowerCAmelCase )
__snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=__lowerCAmelCase )
__snake_case = tokenizer.encode(
'sequence builders' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
__snake_case = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
__snake_case = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
__snake_case = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowercase__ ( self : int ):
__snake_case = self.get_tokenizer()
__snake_case = 'Encode this sequence.'
__snake_case = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
__snake_case = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
__snake_case = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing spaces after special tokens
__snake_case = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )} ) # mask token has a left space
__snake_case = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
__snake_case = 'Encode <mask> sequence'
__snake_case = 'Encode <mask>sequence'
__snake_case = tokenizer.encode(__lowerCAmelCase )
__snake_case = encoded.index(__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = tokenizer.encode(__lowerCAmelCase )
__snake_case = encoded.index(__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowercase__ ( self : List[str] ):
pass
def lowercase__ ( self : Dict ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__snake_case = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = 'A, <mask> AllenNLP sentence.'
__snake_case = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
__snake_case = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
__lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def lowercase__ ( self : Optional[int] ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__snake_case = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__snake_case = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , __lowerCAmelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , __lowerCAmelCase )
self.assertEqual(post_processor_state['trim_offsets'] , __lowerCAmelCase )
def lowercase__ ( self : Optional[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__snake_case = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__snake_case = F'{text_of_1_token} {text_of_1_token}'
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ) + 1, 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
| 427 | 0 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = "bart"
SCREAMING_SNAKE_CASE : str = True
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> Dict:
if LOAD_DENSE_INDEX:
_lowercase : str = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
_lowercase : Any = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
_lowercase : str = qar_model.eval()
else:
_lowercase , _lowercase : str = (None, None)
if MODEL_TYPE == "bart":
_lowercase : Any = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
_lowercase : Tuple = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
_lowercase : Dict = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
_lowercase : int = sas_model.eval()
else:
_lowercase , _lowercase : int = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> Optional[Any]:
if LOAD_DENSE_INDEX:
_lowercase : List[Any] = faiss.StandardGpuResources()
_lowercase : List[Any] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
_lowercase : Dict = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
_lowercase : Any = faiss.IndexFlatIP(128 )
_lowercase : Optional[int] = faiss.index_cpu_to_gpu(lowerCamelCase_ , 1 , lowerCamelCase_ )
wikiaab_gpu_index_flat.add(lowerCamelCase_ ) # TODO fix for larger GPU
else:
_lowercase , _lowercase : Union[str, Any] = (None, None)
_lowercase : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Tuple = datasets.load_dataset('eli5' , name='LFQA_reddit' )
_lowercase : int = elia['train_eli5']
_lowercase : Any = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
_lowercase : Any = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCamelCase_ )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = load_train_data()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=10 ) -> List[Any]:
_lowercase : Any = embed_questions_for_retrieval([question] , lowerCamelCase_ , lowerCamelCase_ )
_lowercase , _lowercase : Any = eli5_train_q_index.search(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Any = [elia_train[int(lowerCamelCase_ )] for i in I[0]]
return nn_examples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_="wiki40b" , lowerCamelCase_="dense" , lowerCamelCase_=10 ) -> Dict:
if source == "none":
_lowercase , _lowercase : List[Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_lowercase , _lowercase : Optional[Any] = query_qa_dense_index(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
_lowercase , _lowercase : List[str] = query_es_index(
lowerCamelCase_ , lowerCamelCase_ , index_name='english_wiki40b_snippets_100w' , n_results=lowerCamelCase_ , )
_lowercase : int = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
_lowercase : Tuple = 'question: {} context: {}'.format(lowerCamelCase_ , lowerCamelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCamelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase_ : None),
} )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=64 , lowerCamelCase_=256 , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=0.95 , lowerCamelCase_=0.8 ) -> List[str]:
with torch.no_grad():
_lowercase : Dict = qa_sas_generate(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , num_answers=1 , num_beams=lowerCamelCase_ , min_len=lowerCamelCase_ , max_len=lowerCamelCase_ , do_sample=lowerCamelCase_ , temp=lowerCamelCase_ , top_p=lowerCamelCase_ , top_k=lowerCamelCase_ , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
SCREAMING_SNAKE_CASE : List[Any] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
SCREAMING_SNAKE_CASE : Dict = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE : str = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
SCREAMING_SNAKE_CASE : str = st.sidebar.checkbox("Demo options")
if demo_options:
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
SCREAMING_SNAKE_CASE : Any = action_list.index(action_st)
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = show_type == "Show full text of passages"
else:
SCREAMING_SNAKE_CASE : Any = 3
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
SCREAMING_SNAKE_CASE : str = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE : List[Any] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
SCREAMING_SNAKE_CASE : str = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
SCREAMING_SNAKE_CASE : Optional[int] = "wiki40b"
SCREAMING_SNAKE_CASE : List[Any] = "dense"
SCREAMING_SNAKE_CASE : str = "beam"
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : int = 64
SCREAMING_SNAKE_CASE : List[Any] = 256
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : str = st.sidebar.checkbox("Generation options")
if generate_options:
SCREAMING_SNAKE_CASE : Tuple = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE : Dict = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
SCREAMING_SNAKE_CASE : str = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE : Dict = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : Dict = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : List[Any] = None
# start main text
SCREAMING_SNAKE_CASE : Optional[int] = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
SCREAMING_SNAKE_CASE : Optional[Any] = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE : int = st.text_input("Enter your question here:", "")
else:
SCREAMING_SNAKE_CASE : int = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method="dense", n_results=10)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = make_support(question, source=wiki_source, method="sparse", n_results=10)
SCREAMING_SNAKE_CASE : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE : int = support_list[:10]
SCREAMING_SNAKE_CASE : Dict = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE : int = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
SCREAMING_SNAKE_CASE : str = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE : Optional[int] = "[{}]({})".format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE : Dict = sec_titles.split(" & ")
SCREAMING_SNAKE_CASE : Union[str, Any] = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE : List[str] = find_nearest_training(question)
SCREAMING_SNAKE_CASE : Optional[int] = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
SCREAMING_SNAKE_CASE : Any = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
SCREAMING_SNAKE_CASE : str = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 89 |
# Lint as: python3
import itertools
import os
import re
_lowercase = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
_lowercase = re.compile(r'''([a-z\d])([A-Z])''')
_lowercase = re.compile(r'''(?<!_)_(?!_)''')
_lowercase = re.compile(r'''(_{2,})''')
_lowercase = r'''^\w+(\.\w+)*$'''
_lowercase = r'''<>:/\|?*'''
def _A (UpperCamelCase : str ) ->str:
'''simple docstring'''
lowerCamelCase__ : List[str] = _uppercase_uppercase_re.sub(r"""\1_\2""" , UpperCamelCase )
lowerCamelCase__ : Optional[int] = _lowercase_uppercase_re.sub(r"""\1_\2""" , UpperCamelCase )
return name.lower()
def _A (UpperCamelCase : Union[str, Any] ) ->int:
'''simple docstring'''
lowerCamelCase__ : Optional[int] = _single_underscore_re.split(UpperCamelCase )
lowerCamelCase__ : int = [_multiple_underscores_re.split(UpperCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCamelCase ) if n != """""" )
def _A (UpperCamelCase : Any ) ->Optional[Any]:
'''simple docstring'''
if os.path.basename(UpperCamelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(UpperCamelCase )
def _A (UpperCamelCase : int , UpperCamelCase : Dict ) ->List[Any]:
'''simple docstring'''
if os.path.basename(UpperCamelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , UpperCamelCase ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(UpperCamelCase )}-{split}"
def _A (UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Optional[int]=None ) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ : Any = filename_prefix_for_split(UpperCamelCase , UpperCamelCase )
if filetype_suffix:
prefix += f".{filetype_suffix}"
lowerCamelCase__ : List[Any] = os.path.join(UpperCamelCase , UpperCamelCase )
return f"{filepath}*"
def _A (UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Dict=None ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ : List[Any] = filename_prefix_for_split(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Tuple = os.path.join(UpperCamelCase , UpperCamelCase )
if shard_lengths:
lowerCamelCase__ : Optional[int] = len(UpperCamelCase )
lowerCamelCase__ : List[Any] = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(UpperCamelCase )]
if filetype_suffix:
lowerCamelCase__ : Tuple = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
lowerCamelCase__ : List[str] = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 157 | 0 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowercase_ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class __a ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Any=None , snake_case_ : int=1)-> Optional[int]:
__lowerCAmelCase = tokenizer
__lowerCAmelCase = dataset
__lowerCAmelCase = len(__snake_case) if n_tasks is None else n_tasks
__lowerCAmelCase = n_copies
def __iter__( self : Tuple)-> Dict:
__lowerCAmelCase = []
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip())
__lowerCAmelCase = self.tokenizer(__snake_case , padding=__snake_case , return_tensors="""pt""")
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __a ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : int)-> Optional[Any]:
__lowerCAmelCase = start_length
__lowerCAmelCase = eof_strings
__lowerCAmelCase = tokenizer
def __call__( self : Any , snake_case_ : str , snake_case_ : Dict , **snake_case_ : List[str])-> Optional[int]:
__lowerCAmelCase = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
__lowerCAmelCase = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(__snake_case)
def __lowerCAmelCase ( __lowerCamelCase : Tuple ) -> int:
__lowerCAmelCase = re.split("""(%s)""" % """|""".join(a_ ) , a_ )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple=20 , **__lowerCamelCase : int ) -> Dict:
__lowerCAmelCase = defaultdict(a_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(a_ ) ):
with torch.no_grad():
__lowerCAmelCase = batch['''ids'''].shape[-1]
__lowerCAmelCase = accelerator.unwrap_model(a_ ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]] , num_return_sequences=a_ , **a_ )
# each task is generated batch_size times
__lowerCAmelCase = batch['''task_id'''].repeat(a_ )
__lowerCAmelCase = accelerator.pad_across_processes(
a_ , dim=1 , pad_index=tokenizer.pad_token_id )
__lowerCAmelCase = accelerator.gather((generated_tokens, generated_tasks) )
__lowerCAmelCase = generated_tokens.cpu().numpy()
__lowerCAmelCase = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(a_ , a_ ):
gen_token_dict[task].append(a_ )
__lowerCAmelCase = [[] for _ in range(a_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__lowerCAmelCase = tokenizer.decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ )
code_gens[task].append(remove_last_block(a_ ) )
return code_gens
def __lowerCAmelCase ( ) -> str:
# Setup configuration
__lowerCAmelCase = HfArgumentParser(a_ )
__lowerCAmelCase = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__lowerCAmelCase = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__lowerCAmelCase = '''false'''
if args.num_workers is None:
__lowerCAmelCase = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__lowerCAmelCase = Accelerator()
set_seed(args.seed , device_specific=a_ )
# Load model and tokenizer
__lowerCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
__lowerCAmelCase = tokenizer.eos_token
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__lowerCAmelCase = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , a_ , a_ )] ),
}
# Load evaluation dataset and metric
__lowerCAmelCase = load_dataset("""openai_humaneval""" )
__lowerCAmelCase = load_metric("""code_eval""" )
__lowerCAmelCase = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
__lowerCAmelCase = args.n_samples // args.batch_size
__lowerCAmelCase = TokenizedDataset(a_ , human_eval["""test"""] , n_copies=a_ , n_tasks=a_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
__lowerCAmelCase = DataLoader(a_ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__lowerCAmelCase = code_eval_metric.compute(references=[""""""] , predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
__lowerCAmelCase = accelerator.prepare(a_ , a_ )
__lowerCAmelCase = complete_code(
a_ , a_ , a_ , a_ , n_tasks=a_ , batch_size=args.batch_size , **a_ , )
if accelerator.is_main_process:
__lowerCAmelCase = []
for task in tqdm(range(a_ ) ):
__lowerCAmelCase = human_eval['''test'''][task]['''test''']
__lowerCAmelCase = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
__lowerCAmelCase = code_eval_metric.compute(
references=a_ , predictions=a_ , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , """w""" ) as fp:
json.dump(a_ , a_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 705 |
def __lowerCAmelCase ( __lowerCamelCase : List[Any] ) -> Any:
__lowerCAmelCase =[]
__lowerCAmelCase =set({"""(""", """[""", """{"""} )
__lowerCAmelCase =set({""")""", """]""", """}"""} )
__lowerCAmelCase ={"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(__lowerCamelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__lowerCamelCase ) == 0 or (len(__lowerCamelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__lowerCamelCase ) == 0
def __lowerCAmelCase ( ) -> List[str]:
__lowerCAmelCase =input("""Enter sequence of brackets: """ )
if is_balanced(__lowerCamelCase ):
print(__lowerCamelCase , """is balanced""" )
else:
print(__lowerCamelCase , """is not balanced""" )
if __name__ == "__main__":
main()
| 456 | 0 |
from itertools import permutations
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_A = [7, 11, 13, 17]
for i, test in enumerate(_SCREAMING_SNAKE_CASE ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
return sum(
int(''.join(map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
for num in permutations(range(_SCREAMING_SNAKE_CASE ) )
if is_substring_divisible(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 27 |
from typing import Dict, Optional
import numpy as np
import datasets
a__ : int = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
a__ : Union[str, Any] = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
a__ : Tuple = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def snake_case (UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : bool , UpperCamelCase : Optional[Dict[int, int]] = None , UpperCamelCase : bool = False , ):
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
lowerCamelCase__ = new_id
# turn into Numpy arrays
lowerCamelCase__ = np.array(UpperCamelCase )
lowerCamelCase__ = np.array(UpperCamelCase )
if reduce_labels:
lowerCamelCase__ = 255
lowerCamelCase__ = label - 1
lowerCamelCase__ = 255
lowerCamelCase__ = label != ignore_index
lowerCamelCase__ = np.not_equal(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ = pred_label[mask]
lowerCamelCase__ = np.array(UpperCamelCase )[mask]
lowerCamelCase__ = pred_label[pred_label == label]
lowerCamelCase__ = np.histogram(UpperCamelCase , bins=UpperCamelCase , range=(0, num_labels - 1) )[0]
lowerCamelCase__ = np.histogram(UpperCamelCase , bins=UpperCamelCase , range=(0, num_labels - 1) )[0]
lowerCamelCase__ = np.histogram(UpperCamelCase , bins=UpperCamelCase , range=(0, num_labels - 1) )[0]
lowerCamelCase__ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def snake_case (UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : List[str] , UpperCamelCase : bool , UpperCamelCase : Optional[Dict[int, int]] = None , UpperCamelCase : bool = False , ):
'''simple docstring'''
lowerCamelCase__ = np.zeros((num_labels,) , dtype=np.floataa )
lowerCamelCase__ = np.zeros((num_labels,) , dtype=np.floataa )
lowerCamelCase__ = np.zeros((num_labels,) , dtype=np.floataa )
lowerCamelCase__ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = intersect_and_union(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def snake_case (UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : bool , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Dict[int, int]] = None , UpperCamelCase : bool = False , ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_intersect_and_union(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# compute metrics
lowerCamelCase__ = {}
lowerCamelCase__ = total_area_intersect.sum() / total_area_label.sum()
lowerCamelCase__ = total_area_intersect / total_area_union
lowerCamelCase__ = total_area_intersect / total_area_label
lowerCamelCase__ = np.nanmean(UpperCamelCase )
lowerCamelCase__ = np.nanmean(UpperCamelCase )
lowerCamelCase__ = all_acc
lowerCamelCase__ = iou
lowerCamelCase__ = acc
if nan_to_num is not None:
lowerCamelCase__ = {metric: np.nan_to_num(UpperCamelCase , nan=UpperCamelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def _UpperCamelCase ( self : Tuple , a_ : Dict , a_ : Any , a_ : int , a_ : bool , a_ : Optional[int] = None , a_ : Optional[Dict[int, int]] = None , a_ : bool = False , ):
"""simple docstring"""
lowerCamelCase__ = mean_iou(
results=a_ , gt_seg_maps=a_ , num_labels=a_ , ignore_index=a_ , nan_to_num=a_ , label_map=a_ , reduce_labels=a_ , )
return iou_result
| 165 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=10 , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=10 , _lowerCAmelCase=0.02 , _lowerCAmelCase=0.9 , _lowerCAmelCase=None , ) -> List[Any]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = patch_size
lowercase = tubelet_size
lowercase = num_frames
lowercase = is_training
lowercase = use_labels
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = mask_ratio
lowercase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase = (image_size // patch_size) ** 2
lowercase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase = int(mask_ratio * self.seq_length )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> List[str]:
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = VideoMAEModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = VideoMAEForPreTraining(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase = torch.ones((self.num_masks,) )
lowercase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase = mask.expand(self.batch_size , -1 ).bool()
lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# model only returns predictions for masked patches
lowercase = mask.sum().item()
lowercase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__A = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = VideoMAEModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Tuple:
'''simple docstring'''
lowercase = copy.deepcopy(_lowerCAmelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase = torch.ones((self.model_tester.num_masks,) )
lowercase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase = bool_masked_pos.to(_lowerCAmelCase )
if return_labels:
if model_class in [
*get_values(_lowerCAmelCase ),
]:
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def _a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def _a ( self ) -> str:
'''simple docstring'''
pass
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
@slow
def _a ( self ) -> int:
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = VideoMAEModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
for model_class in self.all_model_classes:
lowercase = self.model_tester.seq_length - self.model_tester.num_masks
lowercase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase = True
lowercase = False
lowercase = True
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase = True
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase = len(_lowerCAmelCase )
# Check attention is always last and order is fine
lowercase = True
lowercase = True
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(out_len + 1 , len(_lowerCAmelCase ) )
lowercase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _a ( self ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
lowercase = self.model_tester.seq_length - self.model_tester.num_masks
lowercase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( ):
lowercase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase = np.load(lowercase_ )
return list(lowercase_ )
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> str:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_video()
lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([0.3669, -0.0688, -0.2421] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_video()
lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# add boolean mask, indicating which patches to mask
lowercase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
lowercase = torch.load(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size([1, 1408, 1536] )
lowercase = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=_lowerCAmelCase )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase = torch.tensor([0.5142] , device=_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=_lowerCAmelCase ).to(
_lowerCAmelCase )
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
lowercase = torch.tensor(torch.tensor([0.6469] ) , device=_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1E-4 ) )
| 653 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase = update_area_of_max_square(lowercase_ , col + 1 )
lowercase = update_area_of_max_square(row + 1 , col + 1 )
lowercase = update_area_of_max_square(row + 1 , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
return sub_problem_sol
else:
return 0
lowercase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
lowercase = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase = [0]
lowercase = [[-1] * cols for _ in range(lowercase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = dp_array[row][col + 1]
lowercase = dp_array[row + 1][col + 1]
lowercase = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(dp_array[row][col] , lowercase_ )
else:
lowercase = 0
return largest_square_area
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [0] * (cols + 1)
lowercase = [0] * (cols + 1)
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = current_row[col + 1]
lowercase = next_row[col + 1]
lowercase = next_row[col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(current_row[col] , lowercase_ )
else:
lowercase = 0
lowercase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : Dict = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 419 |
from __future__ import annotations
from cmath import sqrt
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
a_ : Any = b * b - 4 * a * c
a_ : List[str] = (-b + sqrt(SCREAMING_SNAKE_CASE_ )) / (2 * a)
a_ : Union[str, Any] = (-b - sqrt(SCREAMING_SNAKE_CASE_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _lowerCamelCase ( ):
"""simple docstring"""
a_ , a_ : str = quadratic_roots(a=5 , b=6 , c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 419 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=99 , UpperCAmelCase__=16 , UpperCAmelCase__=36 , UpperCAmelCase__=6 , UpperCAmelCase__=6 , UpperCAmelCase__=6 , UpperCAmelCase__=37 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=3 , UpperCAmelCase__=4 , UpperCAmelCase__=None , ):
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = embedding_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_hidden_groups
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = AlbertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_A , attention_mask=_A , token_type_ids=_A )
SCREAMING_SNAKE_CASE__ = model(_A , token_type_ids=_A )
SCREAMING_SNAKE_CASE__ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = AlbertForPreTraining(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , sentence_order_label=_A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = AlbertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = AlbertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = AlbertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = AlbertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = AlbertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCAmelCase : str = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Optional[int] = True
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=False ):
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class in get_values(_A ):
SCREAMING_SNAKE_CASE__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_A )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = AlbertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ = type
self.model_tester.create_and_check_model(*_A )
@slow
def lowerCAmelCase__ ( self ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = AlbertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = AlbertModel.from_pretrained("albert-base-v2" )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(_A , attention_mask=_A )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
| 714 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ = "▁" , UpperCAmelCase__ = True , UpperCAmelCase__ = "<unk>" , UpperCAmelCase__ = "</s>" , UpperCAmelCase__ = "<pad>" , ):
SCREAMING_SNAKE_CASE__ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
SCREAMING_SNAKE_CASE__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
SCREAMING_SNAKE_CASE__ = token_dict["token"]
SCREAMING_SNAKE_CASE__ = Tokenizer(Unigram() )
SCREAMING_SNAKE_CASE__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
SCREAMING_SNAKE_CASE__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ ),
pre_tokenizers.Digits(individual_digits=UpperCAmelCase__ ),
pre_tokenizers.Punctuation(),
] )
SCREAMING_SNAKE_CASE__ = decoders.Metaspace(replacement=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = TemplateProcessing(
single=f'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
SCREAMING_SNAKE_CASE__ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = 8000 , UpperCAmelCase__ = True , ):
SCREAMING_SNAKE_CASE__ = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=UpperCAmelCase__ , )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = [files]
self._tokenizer.train(UpperCAmelCase__ , trainer=UpperCAmelCase__ )
self.add_unk_id()
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = 8000 , UpperCAmelCase__ = True , ):
SCREAMING_SNAKE_CASE__ = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=UpperCAmelCase__ , )
self._tokenizer.train_from_iterator(UpperCAmelCase__ , trainer=UpperCAmelCase__ )
self.add_unk_id()
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = json.loads(self._tokenizer.to_str() )
SCREAMING_SNAKE_CASE__ = self.special_tokens["unk"]["id"]
SCREAMING_SNAKE_CASE__ = Tokenizer.from_str(json.dumps(UpperCAmelCase__ ) )
| 112 | 0 |
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
lowercase_ , lowercase_ :str = 0, 1
while True:
lowercase_ , lowercase_ :Optional[Any] = b, a + b
yield b
def UpperCAmelCase_ ( __lowerCamelCase : int = 10_00 ):
lowercase_ :Tuple = 1
lowercase_ :Any = fibonacci_generator()
while len(str(next(__lowerCamelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 172 |
'''simple docstring'''
from typing import List
import numpy as np
def UpperCAmelCase_ ( __lowerCamelCase : dict ):
lowercase_ :Dict = {key: len(__lowerCamelCase ) for key, value in gen_kwargs.items() if isinstance(__lowerCamelCase ,__lowerCamelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
lowercase_ :Any = max(lists_lengths.values() ,default=0 )
return max(1 ,__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : int ):
lowercase_ :Tuple = []
for group_idx in range(__lowerCamelCase ):
lowercase_ :Any = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowercase_ :Optional[Any] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowercase_ :List[str] = range(__lowerCamelCase ,start + num_shards_to_add )
shards_indices_per_group.append(__lowerCamelCase )
return shards_indices_per_group
def UpperCAmelCase_ ( __lowerCamelCase : dict ,__lowerCamelCase : int ):
lowercase_ :Dict = _number_of_shards_in_gen_kwargs(__lowerCamelCase )
if num_shards == 1:
return [dict(__lowerCamelCase )]
else:
lowercase_ :Optional[Any] = _distribute_shards(num_shards=__lowerCamelCase ,max_num_jobs=__lowerCamelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__lowerCamelCase ,__lowerCamelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__lowerCamelCase ) )
]
def UpperCAmelCase_ ( __lowerCamelCase : List[dict] ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] ,__lowerCamelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def UpperCAmelCase_ ( __lowerCamelCase : np.random.Generator ,__lowerCamelCase : dict ):
lowercase_ :Tuple = {len(__lowerCamelCase ) for value in gen_kwargs.values() if isinstance(__lowerCamelCase ,__lowerCamelCase )}
lowercase_ :Optional[Any] = {}
for size in list_sizes:
lowercase_ :int = list(range(__lowerCamelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowercase_ :List[Any] = dict(__lowerCamelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowercase_ :List[str] = [value[i] for i in indices_per_size[len(__lowerCamelCase )]]
return shuffled_kwargs
| 172 | 1 |
from math import isqrt, loga
def A_ ( __a : Optional[Any] ):
"""simple docstring"""
a__ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__ = False
return [i for i in range(2 , _SCREAMING_SNAKE_CASE ) if is_prime[i]]
def A_ ( __a : int = 800_800 , __a : Any = 800_800 ):
"""simple docstring"""
a__ = degree * loga(_SCREAMING_SNAKE_CASE )
a__ = int(_SCREAMING_SNAKE_CASE )
a__ = calculate_prime_numbers(_SCREAMING_SNAKE_CASE )
a__ = 0
a__ = 0
a__ = len(_SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 702 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase):
'''simple docstring'''
def _a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self ):
torch.manual_seed(0 )
a__ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def _a ( self ):
torch.manual_seed(0 )
a__ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def _a ( self ):
torch.manual_seed(0 )
a__ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
a__ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def _a ( self ):
a__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
a__ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
a__ = DDPMScheduler()
a__ = AudioDiffusionPipeline(vqvae=a_ , unet=self.dummy_unet , mel=a_ , scheduler=a_ )
a__ = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
a__ = torch.Generator(device=a_ ).manual_seed(42 )
a__ = pipe(generator=a_ , steps=4 )
a__ = output.audios[0]
a__ = output.images[0]
a__ = torch.Generator(device=a_ ).manual_seed(42 )
a__ = pipe(generator=a_ , steps=4 , return_dict=a_ )
a__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
a__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
a__ = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
a__ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
a__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
a__ = DDIMScheduler()
a__ = self.dummy_vqvae_and_unet
a__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=a_ , scheduler=a_ )
a__ = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
np.random.seed(0 )
a__ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
a__ = torch.Generator(device=a_ ).manual_seed(42 )
a__ = pipe(raw_audio=a_ , generator=a_ , start_step=5 , steps=10 )
a__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
a__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
a__ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
a__ = self.dummy_unet_condition
a__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=a_ , mel=a_ , scheduler=a_ )
a__ = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
np.random.seed(0 )
a__ = torch.rand((1, 1, 10) )
a__ = pipe(generator=a_ , encoding=a_ )
a__ = output.images[0]
a__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
a__ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
'''simple docstring'''
def _a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
a__ = torch_device
a__ = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
a__ = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
a__ = torch.Generator(device=a_ ).manual_seed(42 )
a__ = pipe(generator=a_ )
a__ = output.audios[0]
a__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
a__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
a__ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 351 | 0 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict , lowercase__ : Optional[Any] , lowercase__ : str=1_3 , lowercase__ : List[str]=7 , lowercase__ : List[Any]=True , lowercase__ : Dict=True , lowercase__ : Dict=True , lowercase__ : Union[str, Any]=True , lowercase__ : List[str]=True , lowercase__ : Any=False , lowercase__ : str=False , lowercase__ : Optional[Any]=False , lowercase__ : List[str]=2 , lowercase__ : List[Any]=9_9 , lowercase__ : Optional[int]=0 , lowercase__ : Optional[int]=3_2 , lowercase__ : int=5 , lowercase__ : int=4 , lowercase__ : Dict=0.1 , lowercase__ : str=0.1 , lowercase__ : List[Any]=5_1_2 , lowercase__ : str=2 , lowercase__ : Dict=0.0_2 , lowercase__ : Dict=2 , lowercase__ : Tuple=4 , lowercase__ : Any="last" , lowercase__ : Optional[Any]=True , lowercase__ : Optional[int]=None , lowercase__ : int=0 , ):
__lowercase : Union[str, Any] = parent
__lowercase : str = batch_size
__lowercase : Tuple = seq_length
__lowercase : Tuple = is_training
__lowercase : Union[str, Any] = use_input_lengths
__lowercase : Tuple = use_token_type_ids
__lowercase : Tuple = use_labels
__lowercase : Any = gelu_activation
__lowercase : Dict = sinusoidal_embeddings
__lowercase : Any = causal
__lowercase : Optional[int] = asm
__lowercase : List[str] = n_langs
__lowercase : str = vocab_size
__lowercase : Optional[Any] = n_special
__lowercase : Tuple = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : int = num_attention_heads
__lowercase : str = hidden_dropout_prob
__lowercase : int = attention_probs_dropout_prob
__lowercase : str = max_position_embeddings
__lowercase : Optional[Any] = type_sequence_label_size
__lowercase : Optional[Any] = initializer_range
__lowercase : List[str] = num_labels
__lowercase : Tuple = num_choices
__lowercase : Any = summary_type
__lowercase : List[str] = use_proj
__lowercase : str = scope
__lowercase : Dict = bos_token_id
def snake_case ( self : List[Any] ):
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : str = None
if self.use_input_lengths:
__lowercase : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase : str = None
if self.use_token_type_ids:
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase : Union[str, Any] = None
__lowercase : Optional[Any] = None
__lowercase : Dict = None
if self.use_labels:
__lowercase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : List[str] = ids_tensor([self.batch_size] , 2 ).float()
__lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : List[str] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case ( self : Optional[int] ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def snake_case ( self : List[Any] , lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : str , lowercase__ : Any , lowercase__ : Any , lowercase__ : int , ):
__lowercase : Tuple = XLMModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase : List[str] = model(_UpperCAmelCase , lengths=_UpperCAmelCase , langs=_UpperCAmelCase )
__lowercase : Optional[Any] = model(_UpperCAmelCase , langs=_UpperCAmelCase )
__lowercase : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : List[str] , lowercase__ : int , lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : Dict , ):
__lowercase : List[Any] = XLMWithLMHeadModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase : Any = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Optional[Any] , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , ):
__lowercase : Tuple = XLMForQuestionAnsweringSimple(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase : Dict = model(_UpperCAmelCase )
__lowercase : Optional[int] = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
__lowercase : Dict = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : int , ):
__lowercase : Tuple = XLMForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase : str = model(_UpperCAmelCase )
__lowercase : Tuple = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , p_mask=_UpperCAmelCase , )
__lowercase : Optional[Any] = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , )
(__lowercase ) : List[str] = result_with_labels.to_tuple()
__lowercase : Union[str, Any] = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
(__lowercase ) : Optional[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def snake_case ( self : str , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Optional[int] , ):
__lowercase : Dict = XLMForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase : List[str] = model(_UpperCAmelCase )
__lowercase : Dict = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self : Dict , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Any , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : Tuple , ):
__lowercase : Dict = self.num_labels
__lowercase : str = XLMForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase : Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : Tuple , lowercase__ : str , lowercase__ : str , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : int , ):
__lowercase : Optional[Any] = self.num_choices
__lowercase : Union[str, Any] = XLMForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : Optional[int] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self : List[str] ):
__lowercase : int = self.prepare_config_and_inputs()
(
__lowercase
) : List[str] = config_and_inputs
__lowercase : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : List[Any] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__UpperCAmelCase : int = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case ( self : Any , lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : List[Any] , lowercase__ : Optional[int] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case ( self : int , lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : str=False ):
__lowercase : Any = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
__lowercase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def snake_case ( self : Union[str, Any] ):
__lowercase : Optional[int] = XLMModelTester(self )
__lowercase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , emb_dim=3_7 )
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
def snake_case ( self : List[Any] ):
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_UpperCAmelCase )
def snake_case ( self : str ):
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_UpperCAmelCase )
def snake_case ( self : Optional[int] ):
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_UpperCAmelCase )
def snake_case ( self : List[str] ):
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_UpperCAmelCase )
def snake_case ( self : Tuple ):
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_UpperCAmelCase )
def snake_case ( self : Optional[Any] ):
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_UpperCAmelCase )
def snake_case ( self : Optional[Any] ):
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_UpperCAmelCase )
def snake_case ( self : Optional[int] , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : str=False , lowercase__ : Any=1 ):
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(
[isinstance(_UpperCAmelCase , _UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(_UpperCAmelCase ) )
self.assertEqual(len(_UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_UpperCAmelCase ):
# adds PAD dummy token
__lowercase : Tuple = min_length + idx + 1
__lowercase : Optional[int] = min_length + idx + 1
__lowercase : Tuple = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_UpperCAmelCase ) )
def snake_case ( self : Optional[int] , lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : str , lowercase__ : str , lowercase__ : List[Any]=False , lowercase__ : List[str]=1 ):
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(
[isinstance(_UpperCAmelCase , _UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(_UpperCAmelCase ) , )
self.assertEqual(len(_UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_UpperCAmelCase ):
# adds PAD dummy token
__lowercase : List[str] = min_length + idx + 1
__lowercase : Union[str, Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_UpperCAmelCase ) , )
pass
@slow
def snake_case ( self : Any ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : int = XLMModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : str ):
__lowercase : Optional[Any] = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(_UpperCAmelCase )
__lowercase : Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=_UpperCAmelCase ) # the president
__lowercase : List[str] = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase : Any = model.generate(_UpperCAmelCase , do_sample=_UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _UpperCAmelCase )
| 575 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowerCamelCase : Optional[int] = field(
default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowerCamelCase : Optional[int] = field(
default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
lowerCamelCase : Optional[int] = field(
default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase : Optional[int] = field(
default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase : Optional[int] = field(
default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[float] = field(
default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase : Optional[int] = field(
default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) | 687 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=_lowerCamelCase ):
'''simple docstring'''
_lowerCamelCase =["note_seq"]
def __init__( self : Dict , *a__ : Tuple , **a__ : Union[str, Any] ):
requires_backends(self , ['''note_seq'''] )
@classmethod
def __snake_case ( cls : Any , *a__ : int , **a__ : Optional[int] ):
requires_backends(cls , ['''note_seq'''] )
@classmethod
def __snake_case ( cls : List[str] , *a__ : str , **a__ : List[str] ):
requires_backends(cls , ['''note_seq'''] )
| 700 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
a__ : Tuple = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
a__ : List[str] = 'UperNetConfig'
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , a__ : int , a__ : int , a__ : Union[int, Tuple[int, int]] , a__ : Union[int, Tuple[int, int], str] = 0 , a__ : bool = False , a__ : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
UpperCAmelCase = nn.Convad(
in_channels=a__ , out_channels=a__ , kernel_size=a__ , padding=a__ , bias=a__ , dilation=a__ , )
UpperCAmelCase = nn.BatchNormad(a__ )
UpperCAmelCase = nn.ReLU()
def __snake_case ( self : Optional[int] , a__ : torch.Tensor ):
UpperCAmelCase = self.conv(a__ )
UpperCAmelCase = self.batch_norm(a__ )
UpperCAmelCase = self.activation(a__ )
return output
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , a__ : int , a__ : int , a__ : int ):
super().__init__()
UpperCAmelCase = [
nn.AdaptiveAvgPoolad(a__ ),
UperNetConvModule(a__ , a__ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(a__ ) , a__ )
def __snake_case ( self : Dict , a__ : torch.Tensor ):
UpperCAmelCase = input
for layer in self.layers:
UpperCAmelCase = layer(a__ )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , a__ : Tuple[int, ...] , a__ : int , a__ : int , a__ : bool ):
super().__init__()
UpperCAmelCase = pool_scales
UpperCAmelCase = align_corners
UpperCAmelCase = in_channels
UpperCAmelCase = channels
UpperCAmelCase = []
for i, pool_scale in enumerate(a__ ):
UpperCAmelCase = UperNetPyramidPoolingBlock(pool_scale=a__ , in_channels=a__ , channels=a__ )
self.blocks.append(a__ )
self.add_module(str(a__ ) , a__ )
def __snake_case ( self : str , a__ : torch.Tensor ):
UpperCAmelCase = []
for ppm in self.blocks:
UpperCAmelCase = ppm(a__ )
UpperCAmelCase = nn.functional.interpolate(
a__ , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(a__ )
return ppm_outs
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , a__ : Dict , a__ : int ):
super().__init__()
UpperCAmelCase = config
UpperCAmelCase = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCAmelCase = in_channels
UpperCAmelCase = config.hidden_size
UpperCAmelCase = False
UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCAmelCase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCAmelCase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCAmelCase = nn.ModuleList()
UpperCAmelCase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCAmelCase = UperNetConvModule(a__ , self.channels , kernel_size=1 )
UpperCAmelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(a__ )
self.fpn_convs.append(a__ )
UpperCAmelCase = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __snake_case ( self : List[str] ):
self.apply(self._init_weights )
def __snake_case ( self : Tuple , a__ : Dict ):
if isinstance(a__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __snake_case ( self : List[str] , a__ : Optional[Any] ):
UpperCAmelCase = inputs[-1]
UpperCAmelCase = [x]
psp_outs.extend(self.psp_modules(a__ ) )
UpperCAmelCase = torch.cat(a__ , dim=1 )
UpperCAmelCase = self.bottleneck(a__ )
return output
def __snake_case ( self : Tuple , a__ : torch.Tensor ):
# build laterals
UpperCAmelCase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(a__ ) )
# build top-down path
UpperCAmelCase = len(a__ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase = laterals[i - 1].shape[2:]
UpperCAmelCase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=a__ , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCAmelCase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCAmelCase = torch.cat(a__ , dim=1 )
UpperCAmelCase = self.fpn_bottleneck(a__ )
UpperCAmelCase = self.classifier(a__ )
return output
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , a__ : Any , a__ : int = 2 , a__ : int = 3 , a__ : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
UpperCAmelCase = config
UpperCAmelCase = config.auxiliary_in_channels
UpperCAmelCase = config.auxiliary_channels
UpperCAmelCase = config.auxiliary_num_convs
UpperCAmelCase = config.auxiliary_concat_input
UpperCAmelCase = in_index
UpperCAmelCase = (kernel_size // 2) * dilation
UpperCAmelCase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=a__ , padding=a__ , dilation=a__ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=a__ , padding=a__ , dilation=a__ ) )
if self.num_convs == 0:
UpperCAmelCase = nn.Identity()
else:
UpperCAmelCase = nn.Sequential(*a__ )
if self.concat_input:
UpperCAmelCase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=a__ , padding=kernel_size // 2 )
UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __snake_case ( self : List[str] ):
self.apply(self._init_weights )
def __snake_case ( self : Union[str, Any] , a__ : Optional[Any] ):
if isinstance(a__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __snake_case ( self : Any , a__ : torch.Tensor ):
# just take the relevant feature maps
UpperCAmelCase = encoder_hidden_states[self.in_index]
UpperCAmelCase = self.convs(a__ )
if self.concat_input:
UpperCAmelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCAmelCase = self.classifier(a__ )
return output
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =UperNetConfig
_lowerCamelCase ="pixel_values"
_lowerCamelCase =True
def __snake_case ( self : Dict , a__ : List[str] ):
if isinstance(a__ , a__ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __snake_case ( self : Any ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __snake_case ( self : Union[str, Any] , a__ : Tuple , a__ : Optional[Any]=False ):
if isinstance(a__ , a__ ):
UpperCAmelCase = value
a__ : Union[str, Any] = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
a__ : Union[str, Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , UpperCAmelCase_ , )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , a__ : int ):
super().__init__(a__ )
UpperCAmelCase = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCAmelCase = UperNetHead(a__ , in_channels=self.backbone.channels )
UpperCAmelCase = UperNetFCNHead(a__ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=a__ , config_class=_CONFIG_FOR_DOC )
def __snake_case ( self : Tuple , a__ : Optional[torch.Tensor] = None , a__ : Optional[bool] = None , a__ : Optional[bool] = None , a__ : Optional[torch.Tensor] = None , a__ : Optional[bool] = None , ):
UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCAmelCase = self.backbone.forward_with_filtered_kwargs(
a__ , output_hidden_states=a__ , output_attentions=a__ )
UpperCAmelCase = outputs.feature_maps
UpperCAmelCase = self.decode_head(a__ )
UpperCAmelCase = nn.functional.interpolate(a__ , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=a__ )
UpperCAmelCase = None
if self.auxiliary_head is not None:
UpperCAmelCase = self.auxiliary_head(a__ )
UpperCAmelCase = nn.functional.interpolate(
a__ , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=a__ )
UpperCAmelCase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCAmelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCAmelCase = loss_fct(a__ , a__ )
UpperCAmelCase = loss_fct(a__ , a__ )
UpperCAmelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCAmelCase = (logits,) + outputs[1:]
else:
UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=a__ , logits=a__ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 570 | 0 |
def A__ ( snake_case_ : int = 1_000 ):
SCREAMING_SNAKE_CASE__: Optional[int]= -1
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__: Dict= (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__: str= n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__: List[Any]= a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__: Union[str, Any]= candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 | """simple docstring"""
from __future__ import annotations
from cmath import sqrt
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->tuple[complex, complex]:
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
_lowerCamelCase : List[str] = b * b - 4 * a * c
_lowerCamelCase : int = (-b + sqrt(SCREAMING_SNAKE_CASE_ )) / (2 * a)
_lowerCamelCase : Tuple = (-b - sqrt(SCREAMING_SNAKE_CASE_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCamelCase ( ) ->Optional[int]:
_lowerCamelCase, _lowerCamelCase : List[str] = quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 434 | 0 |
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
a : Tuple = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
a : Tuple = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
a : List[str] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
a : Tuple = F'''down_blocks.{i}.resnets.{j}.'''
a : Optional[int] = F'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
a : Dict = F'''down_blocks.{i}.attentions.{j}.'''
a : int = F'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
a : Dict = F'''up_blocks.{i}.resnets.{j}.'''
a : int = F'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
a : Any = F'''up_blocks.{i}.attentions.{j}.'''
a : Optional[int] = F'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
a : Dict = F'''down_blocks.{i}.downsamplers.0.conv.'''
a : int = F'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
a : Tuple = F'''up_blocks.{i}.upsamplers.0.'''
a : Optional[Any] = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
a : Optional[Any] = '''mid_block.attentions.0.'''
a : Optional[int] = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
a : List[str] = F'''mid_block.resnets.{j}.'''
a : Optional[Any] = F'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Optional[Any] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase : str = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase : str = v.replace(_lowercase , _lowercase )
UpperCAmelCase : Tuple = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase : Any = v.replace(_lowercase , _lowercase )
UpperCAmelCase : Any = v
UpperCAmelCase : Any = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
a : Any = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
a : List[str] = F'''encoder.down_blocks.{i}.resnets.{j}.'''
a : Dict = F'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
a : int = F'''down_blocks.{i}.downsamplers.0.'''
a : List[Any] = F'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
a : Tuple = F'''up_blocks.{i}.upsamplers.0.'''
a : List[str] = F'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
a : Union[str, Any] = F'''decoder.up_blocks.{i}.resnets.{j}.'''
a : Optional[Any] = F'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
a : Dict = F'''mid_block.resnets.{i}.'''
a : Tuple = F'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
a : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def __lowerCamelCase ( _lowercase ) -> int:
return w.reshape(*w.shape , 1 , 1 )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[int] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase : Tuple = v.replace(_lowercase , _lowercase )
UpperCAmelCase : int = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase : int = v.replace(_lowercase , _lowercase )
UpperCAmelCase : int = v
UpperCAmelCase : Dict = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase : int = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'''mid.attn_1.{weight_name}.weight''' in k:
print(F'''Reshaping {k} for SD format''' )
UpperCAmelCase : Optional[int] = reshape_weight_for_sd(_lowercase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
a : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
a : Dict = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
a : str = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
a : Optional[int] = {'''q''': 0, '''k''': 1, '''v''': 2}
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : int = {}
UpperCAmelCase : Optional[Any] = {}
UpperCAmelCase : Dict = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
UpperCAmelCase : Dict = k[: -len(""".q_proj.weight""" )]
UpperCAmelCase : str = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase : List[Any] = [None, None, None]
UpperCAmelCase : Union[str, Any] = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
UpperCAmelCase : str = k[: -len(""".q_proj.bias""" )]
UpperCAmelCase : Tuple = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase : List[Any] = [None, None, None]
UpperCAmelCase : str = v
continue
UpperCAmelCase : str = textenc_pattern.sub(lambda _lowercase : protected[re.escape(m.group(0 ) )] , _lowercase )
UpperCAmelCase : Any = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
UpperCAmelCase : str = textenc_pattern.sub(lambda _lowercase : protected[re.escape(m.group(0 ) )] , _lowercase )
UpperCAmelCase : Dict = torch.cat(_lowercase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
UpperCAmelCase : Optional[Any] = textenc_pattern.sub(lambda _lowercase : protected[re.escape(m.group(0 ) )] , _lowercase )
UpperCAmelCase : str = torch.cat(_lowercase )
return new_state_dict
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
return text_enc_dict
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
a : List[str] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
a : Any = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
a : Union[str, Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
a : Dict = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
a : Dict = load_file(unet_path, device="""cpu""")
else:
a : Optional[Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
a : int = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
a : Dict = load_file(vae_path, device="""cpu""")
else:
a : Any = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
a : str = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
a : str = load_file(text_enc_path, device="""cpu""")
else:
a : Union[str, Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
a : Dict = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
a : Any = convert_unet_state_dict(unet_state_dict)
a : List[Any] = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
a : Optional[int] = convert_vae_state_dict(vae_state_dict)
a : Tuple = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
a : Optional[int] = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
a : int = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
a : List[Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
a : Optional[int] = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
a : str = convert_text_enc_state_dict(text_enc_dict)
a : List[Any] = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
a : Any = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
a : Optional[Any] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
a : str = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 709 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 401 | import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = tempfile.mkdtemp()
_A = BlipImageProcessor()
_A = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
_A = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
_A = InstructBlipProcessor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).qformer_tokenizer
def UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> List[Any]:
_A = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
_A = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor.qformer_tokenizer , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = self.prepare_image_inputs()
_A = image_processor(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = processor(text=lowerCAmelCase_ )
_A = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_A = qformer_tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def UpperCAmelCase ( self ) -> Any:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(lowerCAmelCase_ )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 401 | 1 |
def lowerCAmelCase ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
"""simple docstring"""
UpperCAmelCase__ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
UpperCAmelCase__ = 1 - (matter_density + radiation_density + dark_energy)
UpperCAmelCase__ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
UpperCAmelCase__ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowerCAmelCase : List[str] = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 364 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _UpperCamelCase ( lowerCAmelCase ):
# to overwrite at feature extractactor specific tests
UpperCAmelCase_ = None
UpperCAmelCase_ = None
@property
def UpperCAmelCase_ ( self :int ) -> int:
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase_ ( self :Any ) -> str:
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase , "feature_size" ) )
self.assertTrue(hasattr(lowerCamelCase , "sampling_rate" ) )
self.assertTrue(hasattr(lowerCamelCase , "padding_value" ) )
def UpperCAmelCase_ ( self :str ) -> int:
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCamelCase ) == len(lowerCamelCase ) for x, y in zip(lowerCamelCase , processed_features[input_name] ) ) )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCamelCase )
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
UpperCAmelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def UpperCAmelCase_ ( self :Dict ) -> Union[str, Any]:
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCamelCase )
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
UpperCAmelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def UpperCAmelCase_ ( self :Tuple ) -> Dict:
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCamelCase )
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type="tf" )
UpperCAmelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def UpperCAmelCase_ ( self :int , lowerCamelCase :int=False ) -> str:
def _inputs_have_equal_length(lowerCamelCase :Union[str, Any] ):
UpperCAmelCase__ = len(input[0] )
for input_slice in input[1:]:
if len(lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(lowerCamelCase :Dict , lowerCamelCase :Optional[Any] ):
if len(lowerCamelCase ) != len(lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(lowerCamelCase , lowerCamelCase ):
if not np.allclose(np.asarray(lowerCamelCase ) , np.asarray(lowerCamelCase ) , atol=1e-3 ):
return False
return True
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCamelCase )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = self.feat_extract_tester.seq_length_diff
UpperCAmelCase__ = self.feat_extract_tester.max_seq_length + pad_diff
UpperCAmelCase__ = self.feat_extract_tester.min_seq_length
UpperCAmelCase__ = self.feat_extract_tester.batch_size
UpperCAmelCase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding=lowerCamelCase )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[-1] ) )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )
UpperCAmelCase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding="max_length" )[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , return_tensors="np" )
UpperCAmelCase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , pad_to_multiple_of=10 )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , pad_to_multiple_of=10 )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=lowerCamelCase )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=lowerCamelCase , return_tensors="np" , )
UpperCAmelCase__ = input_a[input_name]
self.assertTrue(all(len(lowerCamelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCamelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
UpperCAmelCase__ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :int=False ) -> str:
def _inputs_have_equal_length(lowerCamelCase :Any ):
UpperCAmelCase__ = len(input[0] )
for input_slice in input[1:]:
if len(lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(lowerCamelCase :Optional[int] , lowerCamelCase :str ):
if len(lowerCamelCase ) != len(lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(lowerCamelCase , lowerCamelCase ):
if not np.allclose(np.asarray(lowerCamelCase ) , np.asarray(lowerCamelCase ) , atol=1e-3 ):
return False
return True
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCamelCase )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=lowerCamelCase )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) )
UpperCAmelCase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
# truncate to smallest with np
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=lowerCamelCase , )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" )
UpperCAmelCase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
# truncate to middle
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=lowerCamelCase , return_tensors="np" , )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=lowerCamelCase )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" )
UpperCAmelCase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(lowerCamelCase , lowerCamelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , truncation=lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding="longest" , truncation=lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding="longest" , truncation=lowerCamelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding="max_length" , truncation=lowerCamelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
UpperCAmelCase__ = 12
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCamelCase , )
UpperCAmelCase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
UpperCAmelCase__ = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
UpperCAmelCase__ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
def UpperCAmelCase_ ( self :int ) -> List[str]:
self._check_padding(numpify=lowerCamelCase )
def UpperCAmelCase_ ( self :List[Any] ) -> int:
self._check_padding(numpify=lowerCamelCase )
def UpperCAmelCase_ ( self :str ) -> str:
self._check_truncation(numpify=lowerCamelCase )
def UpperCAmelCase_ ( self :Dict ) -> str:
self._check_truncation(numpify=lowerCamelCase )
@require_torch
def UpperCAmelCase_ ( self :int ) -> Any:
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def UpperCAmelCase_ ( self :List[Any] ) -> Optional[Any]:
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="tf" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def UpperCAmelCase_ ( self :List[str] ) -> str:
UpperCAmelCase__ = self.feat_extract_dict
UpperCAmelCase__ = True
UpperCAmelCase__ = self.feature_extraction_class(**lowerCamelCase )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = [len(lowerCamelCase ) for x in speech_inputs]
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCamelCase )
def UpperCAmelCase_ ( self :int ) -> int:
UpperCAmelCase__ = self.feat_extract_dict
UpperCAmelCase__ = True
UpperCAmelCase__ = self.feature_extraction_class(**lowerCamelCase )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = [len(lowerCamelCase ) for x in speech_inputs]
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = min(lowerCamelCase )
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , truncation=lowerCamelCase , return_tensors="np" )
self.assertIn("attention_mask" , lowerCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 364 | 1 |
'''simple docstring'''
from math import isqrt, loga
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , a__ , a__ ):
__SCREAMING_SNAKE_CASE = False
return [i for i in range(2 , a__ ) if is_prime[i]]
def a__ ( a__ = 80_08_00 , a__ = 80_08_00 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = degree * loga(a__ )
__SCREAMING_SNAKE_CASE = int(a__ )
__SCREAMING_SNAKE_CASE = calculate_prime_numbers(a__ )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(a__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 627 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """depth_multiplier""" ) )
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str=13 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int=1_024 , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : List[Any]="relu6" , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[Any]=10 , __SCREAMING_SNAKE_CASE : int=None , ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = depth_multiplier
__SCREAMING_SNAKE_CASE = min_depth
__SCREAMING_SNAKE_CASE = tf_padding
__SCREAMING_SNAKE_CASE = int(last_hidden_size * depth_multiplier )
__SCREAMING_SNAKE_CASE = output_stride
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = classifier_dropout_prob
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MobileNetVaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MobileNetVaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowerCAmelCase__ = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MobileNetVaModelTester(self )
__SCREAMING_SNAKE_CASE = MobileNetVaConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str ):
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = outputs.hidden_states
__SCREAMING_SNAKE_CASE = 26
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = MobileNetVaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 627 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 717 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = "imagegpt"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , __lowerCamelCase : List[Any]=5_1_2 + 1 , __lowerCamelCase : Dict=3_2 * 3_2 , __lowerCamelCase : List[str]=5_1_2 , __lowerCamelCase : List[Any]=2_4 , __lowerCamelCase : Any=8 , __lowerCamelCase : Tuple=None , __lowerCamelCase : Any="quick_gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=True , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Dict=False , **__lowerCamelCase : str , ):
UpperCAmelCase__ :Dict = vocab_size
UpperCAmelCase__ :str = n_positions
UpperCAmelCase__ :Tuple = n_embd
UpperCAmelCase__ :Dict = n_layer
UpperCAmelCase__ :List[Any] = n_head
UpperCAmelCase__ :str = n_inner
UpperCAmelCase__ :Optional[Any] = activation_function
UpperCAmelCase__ :str = resid_pdrop
UpperCAmelCase__ :Optional[Any] = embd_pdrop
UpperCAmelCase__ :Tuple = attn_pdrop
UpperCAmelCase__ :int = layer_norm_epsilon
UpperCAmelCase__ :List[Any] = initializer_range
UpperCAmelCase__ :List[Any] = scale_attn_weights
UpperCAmelCase__ :List[str] = use_cache
UpperCAmelCase__ :Tuple = scale_attn_by_inverse_layer_idx
UpperCAmelCase__ :Union[str, Any] = reorder_and_upcast_attn
UpperCAmelCase__ :List[Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase )
class UpperCAmelCase ( _snake_case ):
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __SCREAMING_SNAKE_CASE ( self : Dict , __lowerCamelCase : "FeatureExtractionMixin" , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 3_2 , ):
UpperCAmelCase__ :Tuple = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ :Dict = dict(preprocessor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return inputs
| 467 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ :Any = (3, 3_2, 1_2_8)
UpperCAmelCase__ :Optional[int] = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase__ :Union[str, Any] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCAmelCase__ :str = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
UpperCAmelCase__ :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + '''\n''' )
UpperCAmelCase__ :Dict = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 3_2, '''width''': 1_2_8},
}
UpperCAmelCase__ :Optional[Any] = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Tuple , **__lowerCamelCase : Any ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : int , **__lowerCamelCase : str ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : int ):
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
UpperCAmelCase__ :Optional[int] = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
UpperCAmelCase__ :Union[str, Any] = Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) )
return image_input
def __SCREAMING_SNAKE_CASE ( self : int ):
UpperCAmelCase__ :Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ :str = self.get_image_processor()
UpperCAmelCase__ :List[str] = MgpstrProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ :Optional[int] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
UpperCAmelCase__ :Any = self.get_tokenizer()
UpperCAmelCase__ :int = self.get_image_processor()
UpperCAmelCase__ :Union[str, Any] = MgpstrProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ :List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase__ :List[Any] = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
UpperCAmelCase__ :Any = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
UpperCAmelCase__ :Tuple = self.get_image_processor()
UpperCAmelCase__ :Any = self.get_tokenizer()
UpperCAmelCase__ :Any = MgpstrProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCAmelCase__ :Optional[Any] = self.prepare_image_inputs()
UpperCAmelCase__ :str = image_processor(__lowerCamelCase , return_tensors='''np''' )
UpperCAmelCase__ :Tuple = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
UpperCAmelCase__ :Optional[Any] = self.get_image_processor()
UpperCAmelCase__ :List[Any] = self.get_tokenizer()
UpperCAmelCase__ :List[str] = MgpstrProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCAmelCase__ :Union[str, Any] = '''test'''
UpperCAmelCase__ :Tuple = processor(text=__lowerCamelCase )
UpperCAmelCase__ :Tuple = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __SCREAMING_SNAKE_CASE ( self : str ):
UpperCAmelCase__ :int = self.get_image_processor()
UpperCAmelCase__ :Dict = self.get_tokenizer()
UpperCAmelCase__ :int = MgpstrProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCAmelCase__ :List[Any] = '''test'''
UpperCAmelCase__ :List[Any] = self.prepare_image_inputs()
UpperCAmelCase__ :Any = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
UpperCAmelCase__ :int = self.get_image_processor()
UpperCAmelCase__ :Optional[int] = self.get_tokenizer()
UpperCAmelCase__ :Union[str, Any] = MgpstrProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCAmelCase__ :Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ :Optional[Any] = processor.char_decode(__lowerCamelCase )
UpperCAmelCase__ :int = tokenizer.batch_decode(__lowerCamelCase )
UpperCAmelCase__ :int = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
UpperCAmelCase__ :int = self.get_image_processor()
UpperCAmelCase__ :Optional[int] = self.get_tokenizer()
UpperCAmelCase__ :Dict = MgpstrProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCAmelCase__ :List[str] = None
UpperCAmelCase__ :Optional[int] = self.prepare_image_inputs()
UpperCAmelCase__ :Tuple = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
UpperCAmelCase__ :Dict = self.get_image_processor()
UpperCAmelCase__ :int = self.get_tokenizer()
UpperCAmelCase__ :Any = MgpstrProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCAmelCase__ :Union[str, Any] = torch.randn(1 , 2_7 , 3_8 )
UpperCAmelCase__ :Any = torch.randn(1 , 2_7 , 5_0_2_5_7 )
UpperCAmelCase__ :Union[str, Any] = torch.randn(1 , 2_7 , 3_0_5_2_2 )
UpperCAmelCase__ :Optional[Any] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 467 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = """▁"""
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
_SCREAMING_SNAKE_CASE = {
"""google/pegasus-xsum""": 5_1_2,
}
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__="<pad>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<mask_2>" , lowerCAmelCase__="<mask_1>" , lowerCAmelCase__=None , lowerCAmelCase__=1_03 , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
'''simple docstring'''
_UpperCamelCase : List[str] = offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError(
F"additional_special_tokens should be of type {type(lowerCAmelCase__ )}, but is"
F" {type(lowerCAmelCase__ )}" )
_UpperCamelCase : Any = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(lowerCAmelCase__ ) , self.offset - 1 )
]
if len(set(lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
_UpperCamelCase : Dict = additional_special_tokens_extended
else:
_UpperCamelCase : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )]
_UpperCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token_sent=lowerCAmelCase__ , offset=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
_UpperCamelCase : Dict = mask_token_sent
_UpperCamelCase : List[str] = vocab_file
_UpperCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
# add special tokens to encoder dict
_UpperCamelCase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
_UpperCamelCase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def lowercase_ (self ):
'''simple docstring'''
return len(self.sp_model ) + self.offset
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : str = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.__dict__.copy()
_UpperCamelCase : Optional[int] = None
return state
def __setstate__(self , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCamelCase : Any = {}
_UpperCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ (self , lowerCAmelCase__ ):
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def lowercase_ (self , lowerCAmelCase__ ):
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
_UpperCamelCase : Union[str, Any] = self.sp_model.piece_to_id(lowerCAmelCase__ )
return sp_id + self.offset
def lowercase_ (self , lowerCAmelCase__ ):
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
_UpperCamelCase : Tuple = self.sp_model.IdToPiece(index - self.offset )
return token
def lowercase_ (self , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : int = []
_UpperCamelCase : Dict = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
_UpperCamelCase : Tuple = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def lowercase_ (self , lowerCAmelCase__=False ):
'''simple docstring'''
return 1
def lowercase_ (self , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase__ )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCamelCase : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , "wb" ) as fi:
_UpperCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 239 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = DDIMPipeline
__UpperCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
__UpperCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__UpperCAmelCase = False
def lowercase_ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : Union[str, Any] = {"unet": unet, "scheduler": scheduler}
return components
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__=0 ):
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
_UpperCamelCase : List[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCamelCase : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : int = "cpu"
_UpperCamelCase : Optional[int] = self.get_dummy_components()
_UpperCamelCase : Union[str, Any] = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = pipe(**lowerCAmelCase__ ).images
_UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_UpperCamelCase : str = np.array(
[1.0_00E00, 5.7_17E-01, 4.7_17E-01, 1.0_00E00, 0.0_00E00, 1.0_00E00, 3.0_00E-04, 0.0_00E00, 9.0_00E-04] )
_UpperCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase__ , 1E-3 )
def lowercase_ (self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowercase_ (self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def lowercase_ (self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowercase_ (self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Dict = "google/ddpm-cifar10-32"
_UpperCamelCase : List[Any] = UNetaDModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase : Tuple = DDIMScheduler()
_UpperCamelCase : int = DDIMPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
ddim.to(lowerCAmelCase__ )
ddim.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : List[Any] = torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = ddim(generator=lowerCAmelCase__ , eta=0.0 , output_type="numpy" ).images
_UpperCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : int = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Dict = "google/ddpm-ema-bedroom-256"
_UpperCamelCase : Tuple = UNetaDModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = DDIMScheduler.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase : Dict = DDIMPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
ddpm.to(lowerCAmelCase__ )
ddpm.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = torch.manual_seed(0 )
_UpperCamelCase : Tuple = ddpm(generator=lowerCAmelCase__ , output_type="numpy" ).images
_UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCamelCase : Dict = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 239 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.