code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() A__: int = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ) -> int: # initialize config if "resnet-50" in model_name: _a : List[Any] =ResNetConfig.from_pretrained("""microsoft/resnet-50""" ) elif "resnet-101" in model_name: _a : Any =ResNetConfig.from_pretrained("""microsoft/resnet-101""" ) else: raise ValueError("""Model name should include either resnet50 or resnet101""" ) _a : Dict =DetrConfig(use_timm_backbone=_UpperCAmelCase ,backbone_config=_UpperCAmelCase ) # set label attributes _a : Dict ="""panoptic""" in model_name if is_panoptic: _a : str =250 else: _a : Optional[int] =91 _a : Union[str, Any] ="""huggingface/label-files""" _a : List[Any] ="""coco-detection-id2label.json""" _a : Tuple =json.load(open(hf_hub_download(_UpperCAmelCase ,_UpperCAmelCase ,repo_type="""dataset""" ) ,"""r""" ) ) _a : List[str] ={int(_UpperCAmelCase ): v for k, v in idalabel.items()} _a : Any =idalabel _a : Optional[Any] ={v: k for k, v in idalabel.items()} return config, is_panoptic def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: # here we list all keys to be renamed (original name on the left, our name on the right) _a : str =[] # stem # fmt: off rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") ) rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") ) rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") ) rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") ) rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var", ) ) # 3 convs for i in range(3 ): rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var", ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( (F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias") ) rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias") ) rename_keys.append( (F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append( (F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append( (F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias") ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( (F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append( ( F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight", F"decoder.layers.{i}.encoder_attn.out_proj.weight", ) ) rename_keys.append( ( F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias", F"decoder.layers.{i}.encoder_attn.out_proj.bias", ) ) rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias") ) rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias") ) rename_keys.append( (F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append( (F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append( (F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append( (F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) return rename_keys def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Dict ) -> str: _a : Optional[int] =state_dict.pop(_UpperCAmelCase ) _a : List[str] =val def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Any=False ) -> Optional[Any]: _a : Optional[Any] ="""""" if is_panoptic: _a : Union[str, Any] ="""detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _a : List[Any] =state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) _a : Tuple =state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict _a : int =in_proj_weight[:256, :] _a : Tuple =in_proj_bias[:256] _a : Union[str, Any] =in_proj_weight[256:512, :] _a : List[Any] =in_proj_bias[256:512] _a : Dict =in_proj_weight[-256:, :] _a : Optional[int] =in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention _a : Optional[int] =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" ) _a : List[str] =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict _a : str =in_proj_weight[:256, :] _a : List[str] =in_proj_bias[:256] _a : List[Any] =in_proj_weight[256:512, :] _a : Optional[Any] =in_proj_bias[256:512] _a : Union[str, Any] =in_proj_weight[-256:, :] _a : Optional[Any] =in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention _a : Optional[Any] =state_dict.pop( F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" ) _a : Union[str, Any] =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" ) # next, add query, keys and values (in that order) of cross-attention to the state dict _a : List[Any] =in_proj_weight_cross_attn[:256, :] _a : int =in_proj_bias_cross_attn[:256] _a : Optional[Any] =in_proj_weight_cross_attn[256:512, :] _a : List[str] =in_proj_bias_cross_attn[256:512] _a : List[str] =in_proj_weight_cross_attn[-256:, :] _a : Optional[int] =in_proj_bias_cross_attn[-256:] def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]: _a : Optional[Any] ="""http://images.cocodataset.org/val2017/000000039769.jpg""" _a : str =Image.open(requests.get(_UpperCAmelCase ,stream=_UpperCAmelCase ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : Union[str, Any]=False ) -> Optional[Any]: _a , _a : Optional[int] =get_detr_config(_UpperCAmelCase ) # load original model from torch hub _a : List[str] ={ """detr-resnet-50""": """detr_resnet50""", """detr-resnet-101""": """detr_resnet101""", } logger.info(F"Converting model {model_name}..." ) _a : Tuple =torch.hub.load("""facebookresearch/detr""" ,model_name_to_original_name[model_name] ,pretrained=_UpperCAmelCase ).eval() _a : List[Any] =detr.state_dict() # rename keys for src, dest in create_rename_keys(_UpperCAmelCase ): if is_panoptic: _a : Tuple ="""detr.""" + src rename_key(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # query, key and value matrices need special treatment read_in_q_k_v(_UpperCAmelCase ,is_panoptic=_UpperCAmelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _a : List[str] ="""detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): _a : List[str] =state_dict.pop(_UpperCAmelCase ) _a : int =val elif "class_labels_classifier" in key or "bbox_predictor" in key: _a : Dict =state_dict.pop(_UpperCAmelCase ) _a : int =val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: _a : Tuple =state_dict.pop(_UpperCAmelCase ) _a : Optional[Any] =val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): _a : Any =state_dict.pop(_UpperCAmelCase ) _a : Optional[Any] =val # finally, create HuggingFace model and load state dict _a : List[str] =DetrForSegmentation(_UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(_UpperCAmelCase ) model.load_state_dict(_UpperCAmelCase ) model.eval() # verify our conversion on an image _a : Tuple ="""coco_panoptic""" if is_panoptic else """coco_detection""" _a : Optional[Any] =DetrImageProcessor(format=_UpperCAmelCase ) _a : Any =processor(images=prepare_img() ,return_tensors="""pt""" ) _a : int =encoding["""pixel_values"""] _a : Union[str, Any] =detr(_UpperCAmelCase ) _a : Dict =model(_UpperCAmelCase ) assert torch.allclose(outputs.logits ,original_outputs["""pred_logits"""] ,atol=1e-3 ) assert torch.allclose(outputs.pred_boxes ,original_outputs["""pred_boxes"""] ,atol=1e-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks ,original_outputs["""pred_masks"""] ,atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) processor.save_pretrained(_UpperCAmelCase ) if push_to_hub: # Upload model and image processor to the hub logger.info("""Uploading PyTorch model and image processor to the hub...""" ) model.push_to_hub(F"nielsr/{model_name}" ) processor.push_to_hub(F"nielsr/{model_name}" ) if __name__ == "__main__": A__: int = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''detr-resnet-50''', type=str, choices=['''detr-resnet-50''', '''detr-resnet-101'''], help='''Name of the DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''') A__: Tuple = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
694
'''simple docstring''' from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
694
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version A__: str = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''') A__: List[str] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) A__: Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> List[str]: with open(_UpperCAmelCase ,"""rb""" ) as f: _a : Optional[int] =Image.open(_UpperCAmelCase ) return im.convert("""RGB""" ) @dataclass class A__ : __UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={ "help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)." } , ) __UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) __UpperCamelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the training data."} ) __UpperCamelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the validation data."} ) __UpperCamelCase : Optional[float] = field( default=0.15 , metadata={"help": "Percent to split off of train for validation."} ) __UpperCamelCase : Optional[int] = field( default=UpperCAmelCase__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) __UpperCamelCase : Optional[int] = field( default=UpperCAmelCase__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]: '''simple docstring''' if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( """You must specify either a dataset name from the hub or a train and/or validation directory.""" ) @dataclass class A__ : __UpperCamelCase : str = field( default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , ) __UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCAmelCase__ )} , ) __UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) __UpperCamelCase : str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) __UpperCamelCase : str = field(default=UpperCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ) -> Union[str, Any]: _a : Tuple =torch.stack([example["""pixel_values"""] for example in examples] ) _a : Tuple =torch.tensor([example["""labels"""] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _a : str =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _a , _a , _a : Optional[int] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _a , _a , _a : str =parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_image_classification""" ,_UpperCAmelCase ,_UpperCAmelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _a : str =training_args.get_process_log_level() logger.setLevel(_UpperCAmelCase ) transformers.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _a : int =None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _a : List[str] =get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: _a : Tuple =load_dataset( data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir ,task="""image-classification""" ,use_auth_token=True if model_args.use_auth_token else None ,) else: _a : Optional[Any] ={} if data_args.train_dir is not None: _a : Tuple =os.path.join(data_args.train_dir ,"""**""" ) if data_args.validation_dir is not None: _a : Optional[int] =os.path.join(data_args.validation_dir ,"""**""" ) _a : Dict =load_dataset( """imagefolder""" ,data_files=_UpperCAmelCase ,cache_dir=model_args.cache_dir ,task="""image-classification""" ,) # If we don't have a validation split, split off a percentage of train as validation. _a : Optional[int] =None if """validation""" in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split ,_UpperCAmelCase ) and data_args.train_val_split > 0.0: _a : int =dataset["""train"""].train_test_split(data_args.train_val_split ) _a : Dict =split["""train"""] _a : int =split["""test"""] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _a : Tuple =dataset["""train"""].features["""labels"""].names _a , _a : Tuple ={}, {} for i, label in enumerate(_UpperCAmelCase ): _a : int =str(_UpperCAmelCase ) _a : Optional[int] =label # Load the accuracy metric from the datasets package _a : Optional[int] =evaluate.load("""accuracy""" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_UpperCAmelCase : Dict ): return metric.compute(predictions=np.argmax(p.predictions ,axis=1 ) ,references=p.label_ids ) _a : Any =AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path ,num_labels=len(_UpperCAmelCase ) ,labelaid=_UpperCAmelCase ,idalabel=_UpperCAmelCase ,finetuning_task="""image-classification""" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,) _a : int =AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=_UpperCAmelCase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,) _a : Optional[int] =AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: _a : Optional[int] =image_processor.size["""shortest_edge"""] else: _a : str =(image_processor.size["""height"""], image_processor.size["""width"""]) _a : str =Normalize(mean=image_processor.image_mean ,std=image_processor.image_std ) _a : Tuple =Compose( [ RandomResizedCrop(_UpperCAmelCase ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) _a : Tuple =Compose( [ Resize(_UpperCAmelCase ), CenterCrop(_UpperCAmelCase ), ToTensor(), normalize, ] ) def train_transforms(_UpperCAmelCase : Any ): _a : Optional[int] =[ _train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""] ] return example_batch def val_transforms(_UpperCAmelCase : Tuple ): _a : Tuple =[_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: _a : List[str] =( dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(_UpperCAmelCase ) if training_args.do_eval: if "validation" not in dataset: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: _a : int =( dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(_UpperCAmelCase ) # Initalize our trainer _a : Optional[Any] =Trainer( model=_UpperCAmelCase ,args=_UpperCAmelCase ,train_dataset=dataset["""train"""] if training_args.do_train else None ,eval_dataset=dataset["""validation"""] if training_args.do_eval else None ,compute_metrics=_UpperCAmelCase ,tokenizer=_UpperCAmelCase ,data_collator=_UpperCAmelCase ,) # Training if training_args.do_train: _a : Optional[Any] =None if training_args.resume_from_checkpoint is not None: _a : str =training_args.resume_from_checkpoint elif last_checkpoint is not None: _a : List[str] =last_checkpoint _a : int =trainer.train(resume_from_checkpoint=_UpperCAmelCase ) trainer.save_model() trainer.log_metrics("""train""" ,train_result.metrics ) trainer.save_metrics("""train""" ,train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _a : Dict =trainer.evaluate() trainer.log_metrics("""eval""" ,_UpperCAmelCase ) trainer.save_metrics("""eval""" ,_UpperCAmelCase ) # Write model card and (optionally) push to hub _a : List[Any] ={ """finetuned_from""": model_args.model_name_or_path, """tasks""": """image-classification""", """dataset""": data_args.dataset_name, """tags""": ["""image-classification""", """vision"""], } if training_args.push_to_hub: trainer.push_to_hub(**_UpperCAmelCase ) else: trainer.create_model_card(**_UpperCAmelCase ) if __name__ == "__main__": main()
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]: _a : Dict =current_set.copy() for row_index, row in enumerate(_UpperCAmelCase ): _a : Any =row[0] for column_index, column in enumerate(_UpperCAmelCase ): if magnitude == 0: _a : Any =column continue _a : Union[str, Any] =column / magnitude # Subtract to cancel term _a : Optional[Any] =current_set[0] _a : List[Any] =[first_row] _a : Tuple =current_set[1::] for row in current_set: _a : Any =[] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(_UpperCAmelCase ) continue for column_index in range(len(_UpperCAmelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(_UpperCAmelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: _a : List[str] =final_set[0] _a : Tuple =[] _a : Tuple =[] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _a : str =simplify(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): resultant[i].insert(0 ,current_first_column[i] ) resultant.insert(0 ,_UpperCAmelCase ) _a : List[Any] =resultant return final_set def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list: if len(_UpperCAmelCase ) == 0: raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) _a : str =len(_UpperCAmelCase ) + 1 if any(len(_UpperCAmelCase ) != _length for item in equations ): raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) for row in equations: if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ): raise ValueError("""solve_simultaneous() requires lists of integers""" ) if len(_UpperCAmelCase ) == 1: return [equations[0][-1] / equations[0][0]] _a : str =equations.copy() if any(0 in row for row in data_set ): _a : Optional[int] =data_set.copy() _a : str =[] for row_index, row in enumerate(_UpperCAmelCase ): if 0 not in row: _a : List[Any] =data_set.pop(_UpperCAmelCase ) break if not full_row: raise ValueError("""solve_simultaneous() requires at least 1 full equation""" ) data_set.insert(0 ,_UpperCAmelCase ) _a : Dict =data_set.copy() _a : Any =simplify(_UpperCAmelCase ) _a : Any =simplified[::-1] _a : list =[] for row in simplified: _a : Optional[Any] =row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(_UpperCAmelCase ) == 0: solutions.append(0 ) continue _a : List[str] =temp_row[1::] _a : int =temp_row[::-1] for column_index, column in enumerate(_UpperCAmelCase ): current_solution -= column * solutions[column_index] solutions.append(_UpperCAmelCase ) _a : Tuple =[] for item in solutions: final.append(float(round(_UpperCAmelCase ,5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() A__: int = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
694
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool: _a : Optional[int] =len(_UpperCAmelCase ) _a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): _a : Any =True # sum is not zero and set is empty then false for i in range(1 ,required_sum + 1 ): _a : int =False for i in range(1 ,arr_len + 1 ): for j in range(1 ,required_sum + 1 ): if arr[i - 1] > j: _a : Optional[Any] =subset[i - 1][j] if arr[i - 1] <= j: _a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__: Dict = logging.get_logger(__name__) A__: Optional[int] = { '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = "markuplm" def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any: '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) _a : Any =vocab_size _a : List[str] =hidden_size _a : List[str] =num_hidden_layers _a : Tuple =num_attention_heads _a : Union[str, Any] =hidden_act _a : Tuple =intermediate_size _a : Optional[Any] =hidden_dropout_prob _a : int =attention_probs_dropout_prob _a : Any =max_position_embeddings _a : List[Any] =type_vocab_size _a : List[Any] =initializer_range _a : List[Any] =layer_norm_eps _a : Optional[int] =position_embedding_type _a : List[Any] =use_cache _a : List[str] =classifier_dropout # additional properties _a : int =max_depth _a : Union[str, Any] =max_xpath_tag_unit_embeddings _a : str =max_xpath_subs_unit_embeddings _a : int =tag_pad_id _a : List[Any] =subs_pad_id _a : str =xpath_unit_hidden_size
694
1
'''simple docstring''' import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class A__ : def __init__( self :Any , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[Any]=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Union[str, Any]=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :str=True , SCREAMING_SNAKE_CASE :Tuple=9_9 , SCREAMING_SNAKE_CASE :Tuple=6_4 , SCREAMING_SNAKE_CASE :Dict=5 , SCREAMING_SNAKE_CASE :str=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Dict="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[Any]=0.1 , SCREAMING_SNAKE_CASE :Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE :List[str]=1_6 , SCREAMING_SNAKE_CASE :List[str]=2 , SCREAMING_SNAKE_CASE :int=0.02 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :str=4 , SCREAMING_SNAKE_CASE :Any=None , ) -> int: '''simple docstring''' _a : str =parent _a : str =batch_size _a : str =seq_length _a : List[str] =is_training _a : List[Any] =use_input_mask _a : int =use_token_type_ids _a : List[str] =use_labels _a : Any =vocab_size _a : List[Any] =hidden_size _a : List[Any] =num_hidden_layers _a : Optional[Any] =num_attention_heads _a : Optional[int] =intermediate_size _a : Tuple =hidden_act _a : List[Any] =hidden_dropout_prob _a : str =attention_probs_dropout_prob _a : Tuple =max_position_embeddings _a : str =type_vocab_size _a : Dict =type_sequence_label_size _a : Any =initializer_range _a : Dict =num_labels _a : List[str] =num_choices _a : Union[str, Any] =scope _a : Any =vocab_size - 1 def __UpperCAmelCase ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' _a : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : int =None if self.use_input_mask: _a : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] ) _a : int =None if self.use_labels: _a : Any =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _a : List[str] =self.get_config() return config, input_ids, input_mask, token_labels def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def __UpperCAmelCase ( self :Optional[int] ) -> Tuple: '''simple docstring''' _a , _a , _a , _a : Optional[Any] =self.prepare_config_and_inputs() _a : List[Any] =True return config, input_ids, input_mask, token_labels def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :List[Any] ) -> Any: '''simple docstring''' _a : Any =GPTNeoXModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : str =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Any ) -> Optional[Any]: '''simple docstring''' _a : int =True _a : Tuple =GPTNeoXModel(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : List[str] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :int ) -> int: '''simple docstring''' _a : Union[str, Any] =GPTNeoXForCausalLM(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : str =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any ) -> str: '''simple docstring''' _a : Union[str, Any] =self.num_labels _a : Optional[Any] =GPTNeoXForQuestionAnswering(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : Dict =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Any ) -> Union[str, Any]: '''simple docstring''' _a : Tuple =self.num_labels _a : Union[str, Any] =GPTNeoXForSequenceClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a : Optional[Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Dict ) -> Optional[Any]: '''simple docstring''' _a : Union[str, Any] =self.num_labels _a : List[Any] =GPTNeoXForTokenClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : Optional[int] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :int ) -> List[Any]: '''simple docstring''' _a : List[str] =True _a : List[str] =GPTNeoXForCausalLM(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() # first forward pass _a : Optional[Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE ) _a : str =outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _a : Dict =ids_tensor((self.batch_size, 3) , config.vocab_size ) _a : str =ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _a : str =torch.cat([input_ids, next_tokens] , dim=-1 ) _a : Any =torch.cat([input_mask, next_mask] , dim=-1 ) _a : List[str] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE ) _a : Tuple =output_from_no_past["""hidden_states"""][0] _a : Optional[Any] =model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )["""hidden_states"""][0] # select random slice _a : Optional[Any] =ids_tensor((1,) , output_from_past.shape[-1] ).item() _a : Any =output_from_no_past[:, -3:, random_slice_idx].detach() _a : int =output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def __UpperCAmelCase ( self :Optional[Any] ) -> List[Any]: '''simple docstring''' _a : List[Any] =self.prepare_config_and_inputs() _a , _a , _a , _a : List[Any] =config_and_inputs _a : Optional[int] ={"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : Tuple = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) __UpperCamelCase : List[str] = (GPTNeoXForCausalLM,) if is_torch_available() else () __UpperCamelCase : Optional[Any] = ( { "feature-extraction": GPTNeoXModel, "question-answering": GPTNeoXForQuestionAnswering, "text-classification": GPTNeoXForSequenceClassification, "text-generation": GPTNeoXForCausalLM, "token-classification": GPTNeoXForTokenClassification, "zero-shot": GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase : Dict = False __UpperCamelCase : int = False __UpperCamelCase : str = False __UpperCamelCase : Union[str, Any] = False def __UpperCAmelCase ( self :List[Any] ) -> List[Any]: '''simple docstring''' _a : List[Any] =GPTNeoXModelTester(self ) _a : Any =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=6_4 , num_attention_heads=8 ) def __UpperCAmelCase ( self :Any ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self :Any ) -> str: '''simple docstring''' _a , _a , _a , _a : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple: '''simple docstring''' _a , _a , _a , _a : str =self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Union[str, Any] ) -> Any: '''simple docstring''' # This regression test was failing with PyTorch < 1.3 _a , _a , _a , _a : List[str] =self.model_tester.prepare_config_and_inputs_for_decoder() _a : str =None self.model_tester.create_and_check_model_as_decoder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Tuple ) -> int: '''simple docstring''' _a , _a , _a , _a : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]: '''simple docstring''' _a : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' _a : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[Any] ) -> str: '''simple docstring''' _a : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[Any] ) -> Dict: '''simple docstring''' _a : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def __UpperCAmelCase ( self :Tuple ) -> str: '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Dict: '''simple docstring''' _a , _a : Any =self.model_tester.prepare_config_and_inputs_for_common() _a : Optional[Any] =ids_tensor([1, 1_0] , config.vocab_size ) _a : Dict =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights _a : Any =GPTNeoXModel(SCREAMING_SNAKE_CASE ) original_model.to(SCREAMING_SNAKE_CASE ) original_model.eval() _a : str =original_model(SCREAMING_SNAKE_CASE ).last_hidden_state _a : List[Any] =original_model(SCREAMING_SNAKE_CASE ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights _a : Union[str, Any] ={"""type""": scaling_type, """factor""": 10.0} _a : Dict =GPTNeoXModel(SCREAMING_SNAKE_CASE ) scaled_model.to(SCREAMING_SNAKE_CASE ) scaled_model.eval() _a : List[Any] =scaled_model(SCREAMING_SNAKE_CASE ).last_hidden_state _a : Tuple =scaled_model(SCREAMING_SNAKE_CASE ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-5 ) ) @require_torch class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :List[str] ) -> List[Any]: '''simple docstring''' _a : Tuple =AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: _a : int =GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(SCREAMING_SNAKE_CASE ) _a : List[str] =tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 _a : Optional[int] ="""My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" _a : int =model.generate(**SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , max_new_tokens=2_0 ) _a : int =tokenizer.batch_decode(SCREAMING_SNAKE_CASE )[0] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
694
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''') def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict: hf_model.apply_weight_norm() _a : Any =checkpoint["""input_conv.weight_g"""] _a : Union[str, Any] =checkpoint["""input_conv.weight_v"""] _a : Optional[int] =checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): _a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"] _a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"] _a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): _a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"] _a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"] _a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"] _a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"] _a : Dict =checkpoint["""output_conv.1.weight_g"""] _a : str =checkpoint["""output_conv.1.weight_v"""] _a : Union[str, Any] =checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]: if config_path is not None: _a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase ) else: _a : str =SpeechTaHifiGanConfig() _a : Tuple =SpeechTaHifiGan(_UpperCAmelCase ) _a : int =torch.load(_UpperCAmelCase ) load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase ) _a : Dict =np.load(_UpperCAmelCase ) _a : Union[str, Any] =stats[0].reshape(-1 ) _a : Any =stats[1].reshape(-1 ) _a : Tuple =torch.from_numpy(_UpperCAmelCase ).float() _a : List[str] =torch.from_numpy(_UpperCAmelCase ).float() model.save_pretrained(_UpperCAmelCase ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) A__: Tuple = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
694
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ) -> Optional[Any]: _a : List[Any] =[] _a : Optional[Any] =set({"""(""", """[""", """{"""} ) _a : Tuple =set({""")""", """]""", """}"""} ) _a : Optional[Any] ={"""{""": """}""", """[""": """]""", """(""": """)"""} for i in range(len(_UpperCAmelCase ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(_UpperCAmelCase ) == 0 or (len(_UpperCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(_UpperCAmelCase ) == 0 def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]: _a : int =input("""Enter sequence of brackets: """ ) if is_balanced(_UpperCAmelCase ): print(_UpperCAmelCase ,"""is balanced""" ) else: print(_UpperCAmelCase ,"""is not balanced""" ) if __name__ == "__main__": main()
694
'''simple docstring''' class A__ : def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]: '''simple docstring''' _a : List[str] =None _a : Optional[Any] =None _a : str =graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Optional[int] =len(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =None def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any: '''simple docstring''' if sources is int: _a : Tuple =[sources] if sinks is int: _a : Optional[int] =[sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return _a : Union[str, Any] =sources[0] _a : Tuple =sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: _a : Tuple =0 for i in sources: max_input_flow += sum(self.graph[i] ) _a : List[Any] =len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _a : Any =max_input_flow _a : List[str] =0 _a : List[str] =len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _a : str =max_input_flow _a : Optional[Any] =size - 1 def __UpperCAmelCase ( self :Optional[int] ) -> Tuple: '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int: '''simple docstring''' _a : Tuple =algorithm(self ) class A__ : def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict: '''simple docstring''' _a : List[str] =flow_network _a : List[Any] =flow_network.verticesCount _a : str =flow_network.sourceIndex _a : str =flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _a : List[Any] =flow_network.graph _a : Optional[int] =False def __UpperCAmelCase ( self :List[Any] ) -> List[str]: '''simple docstring''' if not self.executed: self._algorithm() _a : Any =True def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]: '''simple docstring''' pass class A__ ( UpperCAmelCase__ ): def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result _a : List[Any] =-1 def __UpperCAmelCase ( self :Dict ) -> Tuple: '''simple docstring''' if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class A__ ( UpperCAmelCase__ ): def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) _a : int =[[0] * self.verticies_count for i in range(self.verticies_count )] _a : Union[str, Any] =[0] * self.verticies_count _a : Optional[Any] =[0] * self.verticies_count def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]: '''simple docstring''' _a : int =self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _a : Tuple =[ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _a : List[Any] =0 while i < len(SCREAMING_SNAKE_CASE ): _a : Any =vertices_list[i] _a : str =self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) _a : List[str] =0 else: i += 1 _a : Optional[int] =sum(self.preflow[self.source_index] ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]: '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' _a : List[str] =min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]: '''simple docstring''' _a : int =None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _a : Optional[Any] =self.heights[to_index] if min_height is not None: _a : Any =min_height + 1 if __name__ == "__main__": A__: str = [0] A__: Optional[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network A__: Union[str, Any] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate A__: List[str] = flow_network.find_maximum_flow() print(F"maximum flow is {maximum_flow}")
694
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule A__: Union[str, Any] = {'''tokenization_byt5''': ['''ByT5Tokenizer''']} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys A__: Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
694
'''simple docstring''' A__: Optional[int] = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] A__: int = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
694
1
'''simple docstring''' import socket def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: _a : List[str] =socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) _a : str =socket.gethostname() _a : Optional[Any] =12312 sock.connect((host, port) ) sock.send(B"""Hello server!""" ) with open("""Received_file""" ,"""wb""" ) as out_file: print("""File opened""" ) print("""Receiving data...""" ) while True: _a : Optional[Any] =sock.recv(1024 ) if not data: break out_file.write(_UpperCAmelCase ) print("""Successfully received the file""" ) sock.close() print("""Connection closed""" ) if __name__ == "__main__": main()
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F"{price_plus_tax(100, 0.25) = }") print(F"{price_plus_tax(125.50, 0.05) = }")
694
1
'''simple docstring''' from sklearn.metrics import fa_score import datasets A__: List[str] = ''' The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) ''' A__: Tuple = ''' Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives. - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {\'f1\': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results[\'f1\'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results[\'f1\'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro") >>> print(round(results[\'f1\'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'f1\': array([0.8, 0. , 0. ])} ''' A__: Optional[int] = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCAmelCase ( self :Dict ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[int]=None , SCREAMING_SNAKE_CASE :Optional[int]=1 , SCREAMING_SNAKE_CASE :str="binary" , SCREAMING_SNAKE_CASE :Optional[Any]=None ) -> List[Any]: '''simple docstring''' _a : Dict =fa_score( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , pos_label=SCREAMING_SNAKE_CASE , average=SCREAMING_SNAKE_CASE , sample_weight=SCREAMING_SNAKE_CASE ) return {"f1": float(SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
694
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class A__ ( unittest.TestCase ): def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple: '''simple docstring''' _a : Optional[Any] =parent _a : List[str] =batch_size _a : List[str] =seq_length _a : List[Any] =is_training _a : Optional[int] =use_attention_mask _a : List[Any] =use_token_type_ids _a : List[Any] =use_labels _a : Optional[Any] =vocab_size _a : str =hidden_size _a : List[Any] =num_hidden_layers _a : List[Any] =num_attention_heads _a : Union[str, Any] =intermediate_size _a : int =hidden_act _a : List[str] =hidden_dropout_prob _a : Optional[int] =attention_probs_dropout_prob _a : Dict =max_position_embeddings _a : Any =type_vocab_size _a : str =type_sequence_label_size _a : str =initializer_range _a : List[str] =num_choices def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict: '''simple docstring''' _a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : Dict =None if self.use_attention_mask: _a : Any =random_attention_mask([self.batch_size, self.seq_length] ) _a : Optional[int] =None if self.use_token_type_ids: _a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _a : Union[str, Any] =RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' _a : Tuple =self.prepare_config_and_inputs() _a , _a , _a , _a : List[Any] =config_and_inputs _a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def __UpperCAmelCase ( self :int ) -> str: '''simple docstring''' _a : List[Any] =self.prepare_config_and_inputs() _a , _a , _a , _a : Optional[int] =config_and_inputs _a : Tuple =True _a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : Union[str, Any] = True __UpperCamelCase : Dict = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCAmelCase ( self :List[str] ) -> Optional[int]: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self ) @slow def __UpperCAmelCase ( self :str ) -> int: '''simple docstring''' for model_class_name in self.all_model_classes: _a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Dict =model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_flax class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Any ) -> str: '''simple docstring''' _a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Dict =model(SCREAMING_SNAKE_CASE )[0] _a : List[Any] =[1, 1_1, 5_0_2_6_5] self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. _a : Any =np.array( [[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def __UpperCAmelCase ( self :int ) -> int: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. _a : str =np.array( [[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
694
1
'''simple docstring''' import argparse from collections import defaultdict import yaml A__: Dict = '''docs/source/en/_toctree.yml''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ) -> Optional[Any]: _a : Tuple =defaultdict(_UpperCAmelCase ) _a : Any =[] _a : List[str] =[] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(_UpperCAmelCase ) _a : Union[str, Any] =new_doc_list _a : str =[key for key, value in counts.items() if value > 1] _a : Any =[] for duplicate_key in duplicates: _a : Optional[Any] =list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(_UpperCAmelCase ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) _a : Any =sorted(_UpperCAmelCase ,key=lambda _UpperCAmelCase : s["title"].lower() ) # "overview" gets special treatment and is always first if len(_UpperCAmelCase ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(_UpperCAmelCase ) # Sort return overview_doc def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any]=False ) -> Tuple: with open(_UpperCAmelCase ,encoding="""utf-8""" ) as f: _a : Tuple =yaml.safe_load(f.read() ) # Get to the API doc _a : Any =0 while content[api_idx]["title"] != "API": api_idx += 1 _a : Union[str, Any] =content[api_idx]["""sections"""] # Then to the model doc _a : str =0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 _a : Any =api_doc[scheduler_idx]["""sections"""] _a : int =clean_doc_toc(_UpperCAmelCase ) _a : Union[str, Any] =False if new_scheduler_doc != scheduler_doc: _a : int =True if overwrite: _a : List[Any] =new_scheduler_doc if diff: if overwrite: _a : List[Any] =api_doc with open(_UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f: f.write(yaml.dump(_UpperCAmelCase ,allow_unicode=_UpperCAmelCase ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int=False ) -> Optional[Any]: with open(_UpperCAmelCase ,encoding="""utf-8""" ) as f: _a : Tuple =yaml.safe_load(f.read() ) # Get to the API doc _a : Union[str, Any] =0 while content[api_idx]["title"] != "API": api_idx += 1 _a : Optional[Any] =content[api_idx]["""sections"""] # Then to the model doc _a : str =0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 _a : str =False _a : Any =api_doc[pipeline_idx]["""sections"""] _a : Union[str, Any] =[] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: _a : List[Any] =pipeline_doc["""section"""] _a : List[Any] =clean_doc_toc(_UpperCAmelCase ) if overwrite: _a : List[str] =new_sub_pipeline_doc new_pipeline_docs.append(_UpperCAmelCase ) # sort overall pipeline doc _a : Optional[Any] =clean_doc_toc(_UpperCAmelCase ) if new_pipeline_docs != pipeline_docs: _a : List[Any] =True if overwrite: _a : str =new_pipeline_docs if diff: if overwrite: _a : Optional[Any] =api_doc with open(_UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f: f.write(yaml.dump(_UpperCAmelCase ,allow_unicode=_UpperCAmelCase ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') A__: Tuple = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
694
'''simple docstring''' import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A__: Tuple = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]: return field(default_factory=lambda: default ,metadata=_UpperCAmelCase ) @dataclass class A__ : __UpperCamelCase : List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) __UpperCamelCase : List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) __UpperCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) __UpperCamelCase : str = field( default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , ) __UpperCamelCase : str = field( default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , ) __UpperCamelCase : str = field( default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , ) __UpperCamelCase : str = field( default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , ) __UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' warnings.warn( f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils" """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[str]: '''simple docstring''' if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
694
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version A__: Dict = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class A__ : __UpperCamelCase : Optional[str] = field( default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} ) __UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) __UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={"help": "The column name of the images in the files."} ) __UpperCamelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the training data."} ) __UpperCamelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the validation data."} ) __UpperCamelCase : Optional[float] = field( default=0.15 , metadata={"help": "Percent to split off of train for validation."} ) __UpperCamelCase : Optional[int] = field( default=UpperCAmelCase__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) __UpperCamelCase : Optional[int] = field( default=UpperCAmelCase__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]: '''simple docstring''' _a : List[str] ={} if self.train_dir is not None: _a : List[str] =self.train_dir if self.validation_dir is not None: _a : Dict =self.validation_dir _a : int =data_files if data_files else None @dataclass class A__ : __UpperCamelCase : str = field( default=UpperCAmelCase__ , metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) } , ) __UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} ) __UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) } , ) __UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) __UpperCamelCase : str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) __UpperCamelCase : str = field(default=UpperCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) __UpperCamelCase : float = field( default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether or not to train with normalized pixel values as target."} ) @dataclass class A__ ( UpperCAmelCase__ ): __UpperCamelCase : float = field( default=1e-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> Any: _a : List[str] =torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def SCREAMING_SNAKE_CASE_ ( ) -> Dict: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _a : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _a , _a , _a : Optional[Any] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _a , _a , _a : Union[str, Any] =parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" ,_UpperCAmelCase ,_UpperCAmelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _a : int =training_args.get_process_log_level() logger.setLevel(_UpperCAmelCase ) transformers.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _a : int =None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _a : Dict =get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. _a : str =load_dataset( data_args.dataset_name ,data_args.dataset_config_name ,data_files=data_args.data_files ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,) # If we don't have a validation split, split off a percentage of train as validation. _a : Union[str, Any] =None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split ,_UpperCAmelCase ) and data_args.train_val_split > 0.0: _a : Optional[Any] =ds["""train"""].train_test_split(data_args.train_val_split ) _a : Optional[int] =split["""train"""] _a : Tuple =split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _a : Optional[int] ={ """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: _a : Optional[Any] =ViTMAEConfig.from_pretrained(model_args.config_name ,**_UpperCAmelCase ) elif model_args.model_name_or_path: _a : Any =ViTMAEConfig.from_pretrained(model_args.model_name_or_path ,**_UpperCAmelCase ) else: _a : List[str] =ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F"Overriding config: {model_args.config_overrides}" ) config.update_from_string(model_args.config_overrides ) logger.info(F"New config: {config}" ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _a : Optional[int] =ViTImageProcessor.from_pretrained(model_args.image_processor_name ,**_UpperCAmelCase ) elif model_args.model_name_or_path: _a : Optional[Any] =ViTImageProcessor.from_pretrained(model_args.model_name_or_path ,**_UpperCAmelCase ) else: _a : Optional[int] =ViTImageProcessor() # create model if model_args.model_name_or_path: _a : Any =ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=_UpperCAmelCase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,) else: logger.info("""Training new model from scratch""" ) _a : str =ViTMAEForPreTraining(_UpperCAmelCase ) if training_args.do_train: _a : Optional[int] =ds["""train"""].column_names else: _a : int =ds["""validation"""].column_names if data_args.image_column_name is not None: _a : Optional[Any] =data_args.image_column_name elif "image" in column_names: _a : Any ="""image""" elif "img" in column_names: _a : Any ="""img""" else: _a : str =column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _a : str =image_processor.size["""shortest_edge"""] else: _a : Optional[int] =(image_processor.size["""height"""], image_processor.size["""width"""]) _a : Tuple =Compose( [ Lambda(lambda _UpperCAmelCase : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(_UpperCAmelCase ,scale=(0.2, 1.0) ,interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean ,std=image_processor.image_std ), ] ) def preprocess_images(_UpperCAmelCase : int ): _a : Union[str, Any] =[transforms(_UpperCAmelCase ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: _a : int =ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_UpperCAmelCase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: _a : int =( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_UpperCAmelCase ) # Compute absolute learning rate _a : Optional[Any] =( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _a : str =training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer _a : Any =Trainer( model=_UpperCAmelCase ,args=_UpperCAmelCase ,train_dataset=ds["""train"""] if training_args.do_train else None ,eval_dataset=ds["""validation"""] if training_args.do_eval else None ,tokenizer=_UpperCAmelCase ,data_collator=_UpperCAmelCase ,) # Training if training_args.do_train: _a : List[Any] =None if training_args.resume_from_checkpoint is not None: _a : List[Any] =training_args.resume_from_checkpoint elif last_checkpoint is not None: _a : str =last_checkpoint _a : str =trainer.train(resume_from_checkpoint=_UpperCAmelCase ) trainer.save_model() trainer.log_metrics("""train""" ,train_result.metrics ) trainer.save_metrics("""train""" ,train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _a : int =trainer.evaluate() trainer.log_metrics("""eval""" ,_UpperCAmelCase ) trainer.save_metrics("""eval""" ,_UpperCAmelCase ) # Write model card and (optionally) push to hub _a : Tuple ={ """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**_UpperCAmelCase ) else: trainer.create_model_card(**_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ) -> Union[str, Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
694
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class A__ ( UpperCAmelCase__ ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]: '''simple docstring''' _a : int =1.0 if scale is None else scale _a : Optional[Any] =0.0 if loc is None else loc super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] ) @property def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return self.base_dist.mean * self.scale + self.loc @property def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return self.base_dist.variance * self.scale**2 @property def __UpperCAmelCase ( self :Any ) -> List[str]: '''simple docstring''' return self.variance.sqrt() class A__ ( nn.Module ): def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE ) _a : Tuple =args_dim _a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] ) _a : Dict =domain_map def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]: '''simple docstring''' _a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj] return self.domain_map(*SCREAMING_SNAKE_CASE ) class A__ ( nn.Module ): def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int: '''simple docstring''' super().__init__() _a : List[Any] =function def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]: '''simple docstring''' return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) class A__ : __UpperCamelCase : type __UpperCamelCase : int __UpperCamelCase : Dict[str, int] def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None: '''simple docstring''' _a : Any =dim _a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim} def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]: '''simple docstring''' if self.dim == 1: return self.distribution_class(*SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution: '''simple docstring''' _a : str =self._base_distribution(SCREAMING_SNAKE_CASE ) if loc is None and scale is None: return distr else: return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim ) @property def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' return () if self.dim == 1 else (self.dim,) @property def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return len(self.event_shape ) @property def __UpperCAmelCase ( self :Any ) -> float: '''simple docstring''' return 0.0 def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module: '''simple docstring''' return ParameterProjection( in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any: '''simple docstring''' raise NotImplementedError() @staticmethod def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor: '''simple docstring''' return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0 class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __UpperCamelCase : type = StudentT @classmethod def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]: '''simple docstring''' _a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) _a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1} __UpperCamelCase : type = Normal @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict: '''simple docstring''' _a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1} __UpperCamelCase : type = NegativeBinomial @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]: '''simple docstring''' _a : int =cls.squareplus(SCREAMING_SNAKE_CASE ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution: '''simple docstring''' _a , _a : Any =distr_args if self.dim == 1: return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution: '''simple docstring''' _a , _a : Optional[int] =distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
694
1
'''simple docstring''' import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version A__: Optional[Any] = version.parse(importlib_metadata.version('''nltk''')) if NLTK_VERSION >= version.Version('''3.6.4'''): from nltk import word_tokenize A__: Tuple = '''\ @inproceedings{banarjee2005, title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments}, author = {Banerjee, Satanjeev and Lavie, Alon}, booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization}, month = jun, year = {2005}, address = {Ann Arbor, Michigan}, publisher = {Association for Computational Linguistics}, url = {https://www.aclweb.org/anthology/W05-0909}, pages = {65--72}, } ''' A__: Optional[int] = '''\ METEOR, an automatic metric for machine translation evaluation that is based on a generalized concept of unigram matching between the machine-produced translation and human-produced reference translations. Unigrams can be matched based on their surface forms, stemmed forms, and meanings; furthermore, METEOR can be easily extended to include more advanced matching strategies. Once all generalized unigram matches between the two strings have been found, METEOR computes a score for this matching using a combination of unigram-precision, unigram-recall, and a measure of fragmentation that is designed to directly capture how well-ordered the matched words in the machine translation are in relation to the reference. METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic data and 0.331 on the Chinese data. This is shown to be an improvement on using simply unigram-precision, unigram-recall and their harmonic F1 combination. ''' A__: Dict = ''' Computes METEOR score of translated segments against one or more references. Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. alpha: Parameter for controlling relative weights of precision and recall. default: 0.9 beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3 gamma: Relative weight assigned to fragmentation penalty. default: 0.5 Returns: \'meteor\': meteor score. Examples: >>> meteor = datasets.load_metric(\'meteor\') >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"] >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"] >>> results = meteor.compute(predictions=predictions, references=references) >>> print(round(results["meteor"], 4)) 0.6944 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCAmelCase ( self :Dict ) -> Dict: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[ """https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""", """https://en.wikipedia.org/wiki/METEOR""", ] , ) def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Any ) -> int: '''simple docstring''' import nltk nltk.download("""wordnet""" ) if NLTK_VERSION >= version.Version("""3.6.5""" ): nltk.download("""punkt""" ) if NLTK_VERSION >= version.Version("""3.6.6""" ): nltk.download("""omw-1.4""" ) def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=0.9 , SCREAMING_SNAKE_CASE :Union[str, Any]=3 , SCREAMING_SNAKE_CASE :List[str]=0.5 ) -> Tuple: '''simple docstring''' if NLTK_VERSION >= version.Version("""3.6.5""" ): _a : List[Any] =[ meteor_score.single_meteor_score( word_tokenize(SCREAMING_SNAKE_CASE ) , word_tokenize(SCREAMING_SNAKE_CASE ) , alpha=SCREAMING_SNAKE_CASE , beta=SCREAMING_SNAKE_CASE , gamma=SCREAMING_SNAKE_CASE ) for ref, pred in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] else: _a : str =[ meteor_score.single_meteor_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , alpha=SCREAMING_SNAKE_CASE , beta=SCREAMING_SNAKE_CASE , gamma=SCREAMING_SNAKE_CASE ) for ref, pred in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] return {"meteor": np.mean(SCREAMING_SNAKE_CASE )}
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number | (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number & ~(1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number ^ (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool: return ((number >> position) & 1) == 1 def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A__: Tuple = logging.get_logger(__name__) A__: Any = { '''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Union[str, Any] = "poolformer" def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Union[str, Any]=3 , SCREAMING_SNAKE_CASE :Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE :Any=3 , SCREAMING_SNAKE_CASE :str=4.0 , SCREAMING_SNAKE_CASE :Optional[Any]=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE :Any=[6_4, 1_2_8, 3_2_0, 5_1_2] , SCREAMING_SNAKE_CASE :Optional[int]=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE :Union[str, Any]=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE :Optional[int]=[2, 1, 1, 1] , SCREAMING_SNAKE_CASE :Optional[int]=4 , SCREAMING_SNAKE_CASE :List[str]=0.0 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :Union[str, Any]=1e-5 , SCREAMING_SNAKE_CASE :Tuple=0.02 , **SCREAMING_SNAKE_CASE :Any , ) -> List[str]: '''simple docstring''' _a : Union[str, Any] =num_channels _a : Optional[int] =patch_size _a : List[str] =stride _a : Dict =padding _a : Tuple =pool_size _a : Dict =hidden_sizes _a : Optional[Any] =mlp_ratio _a : Optional[int] =depths _a : Optional[int] =patch_sizes _a : Union[str, Any] =strides _a : Optional[int] =num_encoder_blocks _a : str =drop_path_rate _a : str =hidden_act _a : Tuple =use_layer_scale _a : str =layer_scale_init_value _a : int =initializer_range super().__init__(**SCREAMING_SNAKE_CASE ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Optional[Any] = version.parse("1.11" ) @property def __UpperCAmelCase ( self :str ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __UpperCAmelCase ( self :Tuple ) -> float: '''simple docstring''' return 2e-3
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None: if point: if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): for item in point: if not isinstance(_UpperCAmelCase ,(int, float) ): _a : str =( """Expected a list of numbers as input, found """ F"{type(_UpperCAmelCase ).__name__}" ) raise TypeError(_UpperCAmelCase ) else: _a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}" raise TypeError(_UpperCAmelCase ) else: raise ValueError("""Missing an input""" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' import argparse import os import re import packaging.version A__: Optional[Any] = '''examples/''' A__: Tuple = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } A__: Union[str, Any] = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } A__: Union[str, Any] = '''README.md''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : int ,_UpperCAmelCase : str ) -> str: with open(_UpperCAmelCase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: _a : Optional[int] =f.read() _a , _a : Union[str, Any] =REPLACE_PATTERNS[pattern] _a : List[str] =replace.replace("""VERSION""" ,_UpperCAmelCase ) _a : Optional[Any] =re_pattern.sub(_UpperCAmelCase ,_UpperCAmelCase ) with open(_UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.write(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ) -> Optional[Any]: for folder, directories, fnames in os.walk(_UpperCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) ,_UpperCAmelCase ,pattern="""examples""" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Optional[int]=False ) -> Any: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) if not patch: update_version_in_examples(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]: _a : Tuple ="""🤗 Transformers currently provides the following architectures""" _a : Tuple ="""1. Want to contribute a new model?""" with open(_UpperCAmelCase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: _a : Dict =f.readlines() # Find the start of the list. _a : Dict =0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _a : Union[str, Any] =start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): _a : Optional[int] =lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" ,"""https://huggingface.co/docs/transformers/model_doc""" ,) index += 1 with open(_UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.writelines(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: with open(REPLACE_FILES["""init"""] ,"""r""" ) as f: _a : Tuple =f.read() _a : List[str] =REPLACE_PATTERNS["""init"""][0].search(_UpperCAmelCase ).groups()[0] return packaging.version.parse(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any]=False ) -> List[str]: _a : Optional[Any] =get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: _a : Dict =default_version.base_version elif patch: _a : List[str] =F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}" else: _a : Dict =F"{default_version.major}.{default_version.minor + 1}.0" # Now let's ask nicely if that's the right one. _a : Any =input(F"Which version are you releasing? [{default_version}]" ) if len(_UpperCAmelCase ) == 0: _a : Tuple =default_version print(F"Updating version to {version}." ) global_version_update(_UpperCAmelCase ,patch=_UpperCAmelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: _a : List[str] =get_version() _a : Union[str, Any] =F"{current_version.major}.{current_version.minor + 1}.0.dev0" _a : Dict =current_version.base_version # Check with the user we got that right. _a : Union[str, Any] =input(F"Which version are we developing now? [{dev_version}]" ) if len(_UpperCAmelCase ) == 0: _a : Dict =dev_version print(F"Updating version to {version}." ) global_version_update(_UpperCAmelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": A__: Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') A__: Union[str, Any] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
694
'''simple docstring''' from __future__ import annotations class A__ : def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None: '''simple docstring''' _a : int =order # a_{0} ... a_{k} _a : Optional[Any] =[1.0] + [0.0] * order # b_{0} ... b_{k} _a : Tuple =[1.0] + [0.0] * order # x[n-1] ... x[n-k] _a : List[Any] =[0.0] * self.order # y[n-1] ... y[n-k] _a : Tuple =[0.0] * self.order def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None: '''simple docstring''' if len(SCREAMING_SNAKE_CASE ) < self.order: _a : int =[1.0, *a_coeffs] if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : int =( f"Expected a_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : Optional[Any] =( f"Expected b_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) _a : List[str] =a_coeffs _a : Union[str, Any] =b_coeffs def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float: '''simple docstring''' _a : str =0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) _a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0] _a : str =self.input_history[:-1] _a : Optional[Any] =self.output_history[:-1] _a : Optional[int] =sample _a : Tuple =result return result
694
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__: Optional[int] = { '''configuration_nllb_moe''': [ '''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NllbMoeConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: List[str] = [ '''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NllbMoeForConditionalGeneration''', '''NllbMoeModel''', '''NllbMoePreTrainedModel''', '''NllbMoeTop2Router''', '''NllbMoeSparseMLP''', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys A__: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
694
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int: for nxt, d in graph[v]: if nxt in visited_forward: continue _a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf ) _a : int =cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) _a : Tuple =new_cost_f _a : Optional[Any] =v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: _a : str =cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int: _a : Optional[Any] =-1 _a : List[str] =set() _a : Optional[int] =set() _a : Optional[int] ={source: 0} _a : List[str] ={destination: 0} _a : Union[str, Any] ={source: None} _a : Dict ={destination: None} _a : PriorityQueue[Any] =PriorityQueue() _a : PriorityQueue[Any] =PriorityQueue() _a : Optional[int] =np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): _a , _a : str =queue_forward.get() visited_forward.add(_UpperCAmelCase ) _a , _a : List[Any] =queue_backward.get() visited_backward.add(_UpperCAmelCase ) _a : int =pass_and_relaxation( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) _a : Any =pass_and_relaxation( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: _a : Any =shortest_distance return shortest_path_distance A__: Union[str, Any] = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } A__: str = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : str ) -> list[int]: _a : List[Any] =int(_UpperCAmelCase ) # Initialize Result _a : List[str] =[] # Traverse through all denomination for denomination in reversed(_UpperCAmelCase ): # Find denominations while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ): total_value -= int(_UpperCAmelCase ) answer.append(_UpperCAmelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": A__: Optional[Any] = [] A__: Union[str, Any] = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): A__: Any = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F"Denomination {i}: ").strip())) A__: Tuple = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter A__: int = [1, 2, 5, 10, 20, 50, 100, 500, 2000] A__: List[Any] = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F"Following is minimal change for {value}: ") A__: Union[str, Any] = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
694
'''simple docstring''' from math import factorial def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int: return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
694
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class A__ : def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int=1_3 , SCREAMING_SNAKE_CASE :str=7 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Tuple=9_9 , SCREAMING_SNAKE_CASE :str=3_2 , SCREAMING_SNAKE_CASE :Optional[int]=2 , SCREAMING_SNAKE_CASE :Tuple=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :str=1_6 , SCREAMING_SNAKE_CASE :Dict=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :Tuple="None" , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Optional[Any]=4 , SCREAMING_SNAKE_CASE :Tuple=None , ) -> Any: '''simple docstring''' _a : int =parent _a : str =batch_size _a : List[str] =seq_length _a : Any =is_training _a : int =use_input_mask _a : Any =use_token_type_ids _a : List[str] =use_labels _a : Optional[Any] =vocab_size _a : Tuple =hidden_size _a : List[str] =num_hidden_layers _a : Tuple =num_attention_heads _a : Optional[Any] =intermediate_size _a : Union[str, Any] =hidden_act _a : List[str] =hidden_dropout_prob _a : List[str] =attention_probs_dropout_prob _a : int =max_position_embeddings _a : List[str] =type_vocab_size _a : Optional[Any] =type_sequence_label_size _a : Optional[int] =initializer_range _a : Tuple =num_labels _a : Dict =num_choices _a : Union[str, Any] =relative_attention _a : Tuple =position_biased_input _a : List[str] =pos_att_type _a : Tuple =scope def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' _a : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : str =None if self.use_input_mask: _a : Dict =random_attention_mask([self.batch_size, self.seq_length] ) _a : Dict =None if self.use_token_type_ids: _a : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _a : Union[str, Any] =None _a : Any =None _a : Union[str, Any] =None if self.use_labels: _a : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a : Dict =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _a : Optional[Any] =DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :List[str] ) -> str: '''simple docstring''' _a : int =TFDebertaVaModel(config=SCREAMING_SNAKE_CASE ) _a : List[Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _a : Optional[int] =[input_ids, input_mask] _a : Union[str, Any] =model(SCREAMING_SNAKE_CASE ) _a : Dict =model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any ) -> str: '''simple docstring''' _a : int =TFDebertaVaForMaskedLM(config=SCREAMING_SNAKE_CASE ) _a : str ={ """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _a : int =model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :List[str] ) -> Union[str, Any]: '''simple docstring''' _a : Tuple =self.num_labels _a : Union[str, Any] =TFDebertaVaForSequenceClassification(config=SCREAMING_SNAKE_CASE ) _a : Union[str, Any] ={ """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _a : Union[str, Any] =model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :List[Any] ) -> Optional[int]: '''simple docstring''' _a : Optional[Any] =self.num_labels _a : Union[str, Any] =TFDebertaVaForTokenClassification(config=SCREAMING_SNAKE_CASE ) _a : List[Any] ={ """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _a : List[str] =model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]: '''simple docstring''' _a : str =TFDebertaVaForQuestionAnswering(config=SCREAMING_SNAKE_CASE ) _a : List[Any] ={ """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _a : Tuple =model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self :Tuple ) -> int: '''simple docstring''' _a : List[Any] =self.prepare_config_and_inputs() ( ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ) : Optional[int] =config_and_inputs _a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : List[Any] = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) __UpperCamelCase : str = ( { "feature-extraction": TFDebertaVaModel, "fill-mask": TFDebertaVaForMaskedLM, "question-answering": TFDebertaVaForQuestionAnswering, "text-classification": TFDebertaVaForSequenceClassification, "token-classification": TFDebertaVaForTokenClassification, "zero-shot": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) __UpperCamelCase : Dict = False __UpperCamelCase : List[str] = False def __UpperCAmelCase ( self :str ) -> Dict: '''simple docstring''' _a : Optional[Any] =TFDebertaVaModelTester(self ) _a : Optional[int] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 ) def __UpperCAmelCase ( self :Optional[int] ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self :str ) -> int: '''simple docstring''' _a : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Tuple ) -> Dict: '''simple docstring''' _a : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Tuple ) -> Optional[Any]: '''simple docstring''' _a : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :List[Any] ) -> List[str]: '''simple docstring''' _a : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :int ) -> str: '''simple docstring''' _a : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE ) @slow def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]: '''simple docstring''' _a : List[Any] =TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_tf class A__ ( unittest.TestCase ): @unittest.skip(reason="""Model not available yet""" ) def __UpperCAmelCase ( self :str ) -> List[str]: '''simple docstring''' pass @slow def __UpperCAmelCase ( self :str ) -> int: '''simple docstring''' _a : Dict =TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" ) _a : Any =tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ) _a : Optional[Any] =tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _a : Optional[int] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )[0] _a : List[Any] =tf.constant( [[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE , atol=1e-4 )
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list: _a : Tuple =len(_UpperCAmelCase ) _a : str =[] for i in range(len(_UpperCAmelCase ) - pat_len + 1 ): _a : int =True for j in range(_UpperCAmelCase ): if s[i + j] != pattern[j]: _a : int =False break if match_found: position.append(_UpperCAmelCase ) return position if __name__ == "__main__": assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3] print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
694
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class A__ : __UpperCamelCase : Dict = MBartConfig __UpperCamelCase : Dict = {} __UpperCamelCase : Dict = "gelu" def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :List[str]=1_3 , SCREAMING_SNAKE_CASE :Dict=7 , SCREAMING_SNAKE_CASE :str=True , SCREAMING_SNAKE_CASE :Dict=False , SCREAMING_SNAKE_CASE :Tuple=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :List[Any]=2 , SCREAMING_SNAKE_CASE :int=4 , SCREAMING_SNAKE_CASE :str=3_7 , SCREAMING_SNAKE_CASE :int=0.1 , SCREAMING_SNAKE_CASE :Any=0.1 , SCREAMING_SNAKE_CASE :Tuple=2_0 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[Any]=1 , SCREAMING_SNAKE_CASE :int=0 , ) -> List[Any]: '''simple docstring''' _a : int =parent _a : List[Any] =batch_size _a : Tuple =seq_length _a : List[Any] =is_training _a : Dict =use_labels _a : List[Any] =vocab_size _a : Optional[int] =hidden_size _a : Tuple =num_hidden_layers _a : Union[str, Any] =num_attention_heads _a : Optional[Any] =intermediate_size _a : List[str] =hidden_dropout_prob _a : List[Any] =attention_probs_dropout_prob _a : Any =max_position_embeddings _a : Tuple =eos_token_id _a : Union[str, Any] =pad_token_id _a : Union[str, Any] =bos_token_id def __UpperCAmelCase ( self :Union[str, Any] ) -> Any: '''simple docstring''' _a : List[str] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _a : Optional[int] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _a : Tuple =tf.concat([input_ids, eos_tensor] , axis=1 ) _a : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : Dict =self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _a : Union[str, Any] =prepare_mbart_inputs_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return config, inputs_dict def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :str ) -> Any: '''simple docstring''' _a : List[str] =TFMBartModel(config=SCREAMING_SNAKE_CASE ).get_decoder() _a : int =inputs_dict["""input_ids"""] _a : Dict =input_ids[:1, :] _a : Dict =inputs_dict["""attention_mask"""][:1, :] _a : Tuple =inputs_dict["""head_mask"""] _a : Optional[int] =1 # first forward pass _a : List[Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE ) _a , _a : Optional[int] =outputs.to_tuple() _a : Optional[int] =past_key_values[1] def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : int ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[str]=None ,_UpperCAmelCase : Optional[int]=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : str=None ,) -> Tuple: if attention_mask is None: _a : int =tf.cast(tf.math.not_equal(_UpperCAmelCase ,config.pad_token_id ) ,tf.inta ) if decoder_attention_mask is None: _a : Optional[Any] =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ), ] ,axis=-1 ,) if head_mask is None: _a : str =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _a : Optional[Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _a : Optional[Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : Dict = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () __UpperCamelCase : Union[str, Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else () __UpperCamelCase : Dict = ( { "conversational": TFMBartForConditionalGeneration, "feature-extraction": TFMBartModel, "summarization": TFMBartForConditionalGeneration, "text2text-generation": TFMBartForConditionalGeneration, "translation": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) __UpperCamelCase : List[Any] = True __UpperCamelCase : Union[str, Any] = False __UpperCamelCase : Dict = False def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :List[str] ) -> List[Any]: '''simple docstring''' if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def __UpperCAmelCase ( self :Optional[int] ) -> List[str]: '''simple docstring''' _a : str =TFMBartModelTester(self ) _a : Optional[int] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Dict ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple: '''simple docstring''' _a : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE ) @require_sentencepiece @require_tokenizers @require_tf class A__ ( unittest.TestCase ): __UpperCamelCase : List[Any] = [ " UN Chief Says There Is No Military Solution in Syria", ] __UpperCamelCase : Tuple = [ "Şeful ONU declară că nu există o soluţie militară în Siria", ] __UpperCamelCase : Any = "facebook/mbart-large-en-ro" @cached_property def __UpperCAmelCase ( self :Optional[Any] ) -> Any: '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __UpperCAmelCase ( self :int ) -> List[str]: '''simple docstring''' _a : Union[str, Any] =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __UpperCAmelCase ( self :Dict , **SCREAMING_SNAKE_CASE :Any ) -> List[str]: '''simple docstring''' _a : Optional[Any] =self.translate_src_text(**SCREAMING_SNAKE_CASE ) self.assertListEqual(self.expected_text , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[Any] , **SCREAMING_SNAKE_CASE :List[str] ) -> Dict: '''simple docstring''' _a : Optional[int] =self.tokenizer(self.src_text , **SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) _a : List[Any] =self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) _a : Union[str, Any] =self.tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) return generated_words @slow def __UpperCAmelCase ( self :List[Any] ) -> Dict: '''simple docstring''' self._assert_generated_batch_equal_expected()
694
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class A__ ( unittest.TestCase ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple: '''simple docstring''' _a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8} _a : int =parent _a : Optional[int] =batch_size _a : List[str] =num_channels _a : Optional[Any] =image_size _a : int =min_resolution _a : str =max_resolution _a : str =do_resize _a : Tuple =size _a : Tuple =do_normalize def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804], [-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self :List[Any] ) -> List[Any]: '''simple docstring''' _a : Any =ImageGPTImageProcessingTester(self ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self :Dict ) -> Any: '''simple docstring''' _a : Optional[Any] =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) ) def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]: '''simple docstring''' _a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} ) _a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} ) def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]: '''simple docstring''' _a : List[str] =self.image_processing_class(**self.image_processor_dict ) _a : Dict =json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) ) else: self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' _a : List[Any] =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" ) image_processor_first.to_json_file(SCREAMING_SNAKE_CASE ) _a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict() _a : Tuple =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> str: '''simple docstring''' _a : List[str] =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE ) _a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict() _a : Union[str, Any] =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: _a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" ) _a : Dict =Image.open(dataset[4]["""file"""] ) _a : Optional[int] =Image.open(dataset[5]["""file"""] ) _a : Optional[Any] =[imagea, imagea] return images @require_vision @require_torch class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) _a : int =prepare_images() # test non-batched _a : Dict =image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) ) _a : Optional[int] =[3_0_6, 1_9_1, 1_9_1] self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE ) # test batched _a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) ) _a : Any =[3_0_3, 1_3, 1_3] self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
694
1
'''simple docstring''' from cva import destroyAllWindows, imread, imshow, waitKey def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ) -> Dict: # getting number of pixels in the image _a , _a : Dict =img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(_UpperCAmelCase ): for j in range(_UpperCAmelCase ): _a : Any =[255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image A__: Dict = imread('''image_data/lena.jpg''', 1) # convert to its negative A__: Union[str, Any] = convert_to_negative(img) # show result image imshow('''negative of original image''', img) waitKey(0) destroyAllWindows()
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool: _a : Optional[int] =len(_UpperCAmelCase ) _a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): _a : Any =True # sum is not zero and set is empty then false for i in range(1 ,required_sum + 1 ): _a : int =False for i in range(1 ,arr_len + 1 ): for j in range(1 ,required_sum + 1 ): if arr[i - 1] > j: _a : Optional[Any] =subset[i - 1][j] if arr[i - 1] <= j: _a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: _a : Optional[Any] =ArgumentParser("""Transformers CLI tool""" ,usage="""transformers-cli <command> [<args>]""" ) _a : Any =parser.add_subparsers(help="""transformers-cli command helpers""" ) # Register commands ConvertCommand.register_subcommand(_UpperCAmelCase ) DownloadCommand.register_subcommand(_UpperCAmelCase ) EnvironmentCommand.register_subcommand(_UpperCAmelCase ) RunCommand.register_subcommand(_UpperCAmelCase ) ServeCommand.register_subcommand(_UpperCAmelCase ) UserCommands.register_subcommand(_UpperCAmelCase ) AddNewModelCommand.register_subcommand(_UpperCAmelCase ) AddNewModelLikeCommand.register_subcommand(_UpperCAmelCase ) LfsCommands.register_subcommand(_UpperCAmelCase ) PTtoTFCommand.register_subcommand(_UpperCAmelCase ) # Let's go _a : str =parser.parse_args() if not hasattr(_UpperCAmelCase ,"""func""" ): parser.print_help() exit(1 ) # Run _a : Dict =args.func(_UpperCAmelCase ) service.run() if __name__ == "__main__": main()
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int: _a : Optional[Any] =[] _a , _a : Union[str, Any] =0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_UpperCAmelCase ) _a , _a : Optional[Any] =b, a + b return sum(_UpperCAmelCase ) if __name__ == "__main__": print(F"{solution() = }")
694
1
'''simple docstring''' import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : List[Any] = CodeGenTokenizer __UpperCamelCase : Union[str, Any] = CodeGenTokenizerFast __UpperCamelCase : Optional[int] = True __UpperCamelCase : List[Any] = {"add_prefix_space": True} __UpperCamelCase : int = False def __UpperCAmelCase ( self :List[Any] ) -> List[str]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _a : Union[str, Any] =[ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", """<|endoftext|>""", ] _a : List[str] =dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) _a : Dict =["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] _a : Any ={"""unk_token""": """<unk>"""} _a : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) _a : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE ) ) def __UpperCAmelCase ( self :Optional[int] , **SCREAMING_SNAKE_CASE :Any ) -> List[str]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Union[str, Any] , **SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any] ) -> Dict: '''simple docstring''' _a : str ="""lower newer""" _a : Optional[Any] ="""lower newer""" return input_text, output_text def __UpperCAmelCase ( self :Any ) -> Optional[Any]: '''simple docstring''' _a : List[Any] =CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _a : Any ="""lower newer""" _a : Any =["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] _a : Optional[int] =tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Optional[Any] =tokens + [tokenizer.unk_token] _a : Any =[1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' if not self.test_rust_tokenizer: return _a : Tuple =self.get_tokenizer() _a : Tuple =self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE ) _a : int ="""lower newer""" # Testing tokenization _a : List[str] =tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) _a : Optional[int] =rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing conversion to ids without special tokens _a : Dict =tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing conversion to ids with special tokens _a : str =self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE ) _a : Dict =tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) _a : Optional[int] =rust_tokenizer.encode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing the unknown token _a : Optional[Any] =tokens + [rust_tokenizer.unk_token] _a : Any =[1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Union[str, Any] , *SCREAMING_SNAKE_CASE :List[str] , **SCREAMING_SNAKE_CASE :int ) -> Optional[int]: '''simple docstring''' # It's very difficult to mix/test pretokenization with byte-level # And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :int=1_5 ) -> Union[str, Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): _a : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # Simple input _a : Union[str, Any] ="""This is a simple input""" _a : Optional[int] =["""This is a simple input 1""", """This is a simple input 2"""] _a : Tuple =("""This is a simple input""", """This is a pair""") _a : Dict =[ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="""max_length""" ) # Simple input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="""max_length""" ) # Simple input self.assertRaises( SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="""max_length""" , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="""max_length""" ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="""max_length""" ) # Pair input self.assertRaises( SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="""max_length""" , ) def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple: '''simple docstring''' _a : Union[str, Any] =CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" ) # Simple input _a : int ="""This is a simple input""" _a : List[Any] =["""This is a simple input looooooooong""", """This is a simple input"""] _a : Optional[int] =("""This is a simple input""", """This is a pair""") _a : Optional[int] =[ ("""This is a simple input loooooong""", """This is a simple input"""), ("""This is a simple pair loooooong""", """This is a simple pair"""), ] _a : Dict =tokenizer.pad_token_id _a : Optional[Any] =tokenizer(SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=3_0 , return_tensors="""np""" ) _a : int =tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="""np""" ) _a : str =tokenizer(*SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=6_0 , return_tensors="""np""" ) _a : Dict =tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="""np""" ) # s # test single string max_length padding self.assertEqual(out_s["""input_ids"""].shape[-1] , 3_0 ) self.assertTrue(pad_token_id in out_s["""input_ids"""] ) self.assertTrue(0 in out_s["""attention_mask"""] ) # s2 # test automatic padding self.assertEqual(out_sa["""input_ids"""].shape[-1] , 3_3 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] ) self.assertFalse(0 in out_sa["""attention_mask"""][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] ) self.assertTrue(0 in out_sa["""attention_mask"""][1] ) # p # test single pair max_length padding self.assertEqual(out_p["""input_ids"""].shape[-1] , 6_0 ) self.assertTrue(pad_token_id in out_p["""input_ids"""] ) self.assertTrue(0 in out_p["""attention_mask"""] ) # p2 # test automatic padding pair self.assertEqual(out_pa["""input_ids"""].shape[-1] , 5_2 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] ) self.assertFalse(0 in out_pa["""attention_mask"""][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] ) self.assertTrue(0 in out_pa["""attention_mask"""][1] ) def __UpperCAmelCase ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' _a : List[Any] ="""$$$""" _a : Optional[int] =CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE ) _a : Tuple ="""This is a simple input""" _a : List[Any] =["""This is a simple input 1""", """This is a simple input 2"""] _a : Dict =tokenizer.bos_token_id _a : Any =tokenizer(SCREAMING_SNAKE_CASE ) _a : Tuple =tokenizer(SCREAMING_SNAKE_CASE ) self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) _a : Tuple =tokenizer.decode(out_s.input_ids ) _a : int =tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def __UpperCAmelCase ( self :str ) -> int: '''simple docstring''' _a : int =CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" ) _a : Tuple ="""\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#""" _a : List[str] ="""\nif len_a > len_b: result = a\nelse: result = b""" _a : int =tokenizer.encode(SCREAMING_SNAKE_CASE ) _a : str =["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""] _a : str =tokenizer.decode(SCREAMING_SNAKE_CASE , truncate_before_pattern=SCREAMING_SNAKE_CASE ) self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :List[Any] ) -> Any: '''simple docstring''' pass
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return 0 elif n == 2: return 1 else: _a : Dict =[0, 1] for i in range(2 ,n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: _a : Union[str, Any] =0 _a : Optional[Any] =2 while digits < n: index += 1 _a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) ) return index def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int: return fibonacci_digits_index(_UpperCAmelCase ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
694
1
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging A__: List[str] = logging.get_logger(__name__) A__: Optional[Any] = { '''huggingface/time-series-transformer-tourism-monthly''': ( '''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json''' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class A__ ( UpperCAmelCase__ ): __UpperCamelCase : List[Any] = "time_series_transformer" __UpperCamelCase : str = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :str = "student_t" , SCREAMING_SNAKE_CASE :str = "nll" , SCREAMING_SNAKE_CASE :int = 1 , SCREAMING_SNAKE_CASE :List[int] = [1, 2, 3, 4, 5, 6, 7] , SCREAMING_SNAKE_CASE :Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE :int = 0 , SCREAMING_SNAKE_CASE :int = 0 , SCREAMING_SNAKE_CASE :int = 0 , SCREAMING_SNAKE_CASE :int = 0 , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :int = 2 , SCREAMING_SNAKE_CASE :int = 2 , SCREAMING_SNAKE_CASE :int = 2 , SCREAMING_SNAKE_CASE :int = 2 , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :str = "gelu" , SCREAMING_SNAKE_CASE :int = 6_4 , SCREAMING_SNAKE_CASE :float = 0.1 , SCREAMING_SNAKE_CASE :float = 0.1 , SCREAMING_SNAKE_CASE :float = 0.1 , SCREAMING_SNAKE_CASE :float = 0.1 , SCREAMING_SNAKE_CASE :float = 0.1 , SCREAMING_SNAKE_CASE :int = 1_0_0 , SCREAMING_SNAKE_CASE :float = 0.02 , SCREAMING_SNAKE_CASE :List[Any]=True , **SCREAMING_SNAKE_CASE :List[Any] , ) -> Optional[int]: '''simple docstring''' # time series specific configuration _a : str =prediction_length _a : Union[str, Any] =context_length or prediction_length _a : List[str] =distribution_output _a : Tuple =loss _a : List[Any] =input_size _a : Optional[Any] =num_time_features _a : Union[str, Any] =lags_sequence _a : Optional[int] =scaling _a : List[Any] =num_dynamic_real_features _a : List[str] =num_static_real_features _a : str =num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) _a : Any =cardinality else: _a : List[str] =[0] if embedding_dimension and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) _a : Union[str, Any] =embedding_dimension else: _a : Optional[int] =[min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] _a : List[str] =num_parallel_samples # Transformer architecture configuration _a : Any =input_size * len(SCREAMING_SNAKE_CASE ) + self._number_of_features _a : Union[str, Any] =d_model _a : Union[str, Any] =encoder_attention_heads _a : Dict =decoder_attention_heads _a : Dict =encoder_ffn_dim _a : Optional[Any] =decoder_ffn_dim _a : List[str] =encoder_layers _a : Union[str, Any] =decoder_layers _a : Any =dropout _a : Optional[int] =attention_dropout _a : Optional[int] =activation_dropout _a : Union[str, Any] =encoder_layerdrop _a : Optional[int] =decoder_layerdrop _a : Tuple =activation_function _a : Optional[Any] =init_std _a : List[Any] =use_cache super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def __UpperCAmelCase ( self :List[str] ) -> int: '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
694
'''simple docstring''' import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str: # Initialise PyTorch model _a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase ) print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) ) _a : Dict =RemBertModel(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # Save pytorch-model print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) ) torch.save(model.state_dict() ,_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--rembert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained RemBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A__: Tuple = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
694
1
'''simple docstring''' A__: List[str] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def SCREAMING_SNAKE_CASE_ ( ) -> None: _a : Optional[int] =input("""Enter message: """ ) _a : Union[str, Any] =input("""Enter key [alphanumeric]: """ ) _a : Union[str, Any] =input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): _a : Optional[Any] ="""encrypt""" _a : Optional[int] =encrypt_message(_UpperCAmelCase ,_UpperCAmelCase ) elif mode.lower().startswith("""d""" ): _a : str ="""decrypt""" _a : Dict =decrypt_message(_UpperCAmelCase ,_UpperCAmelCase ) print(F"\n{mode.title()}ed message:" ) print(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> str: return translate_message(_UpperCAmelCase ,_UpperCAmelCase ,"""encrypt""" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> str: return translate_message(_UpperCAmelCase ,_UpperCAmelCase ,"""decrypt""" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : str ) -> str: _a : List[Any] =[] _a : int =0 _a : Tuple =key.upper() for symbol in message: _a : str =LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(_UpperCAmelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(_UpperCAmelCase ): _a : Any =0 else: translated.append(_UpperCAmelCase ) return "".join(_UpperCAmelCase ) if __name__ == "__main__": main()
694
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A__: Optional[int] = logging.get_logger(__name__) A__: Union[str, Any] = '''▁''' A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''} A__: Optional[int] = { '''vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''', }, '''monolingual_vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''', }, } A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024} class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = VOCAB_FILES_NAMES __UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None: '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it _a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token _a : int ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , ) _a : Dict =vocab_file _a : int =monolingual_vocab_file _a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _a : List[Any] ={} _a : List[str] =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: _a : Optional[Any] =cnt cnt += 1 with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _a : int =line.strip().split()[0] _a : str =len(self.fairseq_tokens_to_ids ) if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: _a : Optional[int] =len(self.fairseq_tokens_to_ids ) _a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self :int ) -> List[Any]: '''simple docstring''' _a : Optional[int] =self.__dict__.copy() _a : Optional[Any] =None _a : str =self.sp_model.serialized_model_proto() return state def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str: '''simple docstring''' _a : List[str] =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _a : Tuple ={} _a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _a : Optional[int] =[self.cls_token_id] _a : int =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _a : List[str] =[self.sep_token_id] _a : int =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __UpperCAmelCase ( self :Optional[int] ) -> int: '''simple docstring''' return len(self.fairseq_ids_to_tokens ) def __UpperCAmelCase ( self :int ) -> Union[str, Any]: '''simple docstring''' _a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict: '''simple docstring''' return self.fairseq_ids_to_tokens[index] def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip() return out_string def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _a : int =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _a : Any =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi: _a : Optional[Any] =self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" ) return out_vocab_file, out_monolingual_vocab_file
694
1
'''simple docstring''' import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node A__: int = 4 A__: Dict = 3 class A__ ( UpperCAmelCase__ ): pass def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ) -> Optional[Any]: for shard in shards: for i in range(_UpperCAmelCase ): yield {"i": i, "shard": shard} def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: _a : Optional[int] =int(os.environ["""RANK"""] ) _a : Union[str, Any] =int(os.environ["""WORLD_SIZE"""] ) _a : Optional[int] =ArgumentParser() parser.add_argument("""--streaming""" ,type=_UpperCAmelCase ) parser.add_argument("""--local_rank""" ,type=_UpperCAmelCase ) parser.add_argument("""--num_workers""" ,type=_UpperCAmelCase ,default=0 ) _a : Any =parser.parse_args() _a : str =args.streaming _a : Optional[Any] =args.num_workers _a : Optional[int] ={"""shards""": [F"shard_{shard_idx}" for shard_idx in range(_UpperCAmelCase )]} _a : int =IterableDataset.from_generator(_UpperCAmelCase ,gen_kwargs=_UpperCAmelCase ) if not streaming: _a : List[Any] =Dataset.from_list(list(_UpperCAmelCase ) ) _a : Optional[int] =split_dataset_by_node(_UpperCAmelCase ,rank=_UpperCAmelCase ,world_size=_UpperCAmelCase ) _a : List[Any] =torch.utils.data.DataLoader(_UpperCAmelCase ,num_workers=_UpperCAmelCase ) _a : Union[str, Any] =NUM_SHARDS * NUM_ITEMS_PER_SHARD _a : Any =full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) _a : Optional[int] =sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(F"local_size {local_size} != expected_local_size {expected_local_size}" ) if __name__ == "__main__": main()
694
'''simple docstring''' from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
694
1
'''simple docstring''' import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin A__: Any = 1E-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class A__ : def __init__( self :int , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[str]=1_6 , SCREAMING_SNAKE_CASE :Optional[Any]=1_3 , SCREAMING_SNAKE_CASE :int=7 , SCREAMING_SNAKE_CASE :List[str]=1_4 , SCREAMING_SNAKE_CASE :Optional[Any]=1_0 , SCREAMING_SNAKE_CASE :Optional[int]=1_9 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :Dict=4 , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE :Any=2 , SCREAMING_SNAKE_CASE :Tuple=4 , SCREAMING_SNAKE_CASE :int=4 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Optional[Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :Dict=[1, 2, 3, 4, 5] , SCREAMING_SNAKE_CASE :Tuple=2_5 , SCREAMING_SNAKE_CASE :str=5 , ) -> Tuple: '''simple docstring''' _a : List[str] =d_model _a : Optional[int] =parent _a : Optional[int] =batch_size _a : Tuple =prediction_length _a : str =context_length _a : int =cardinality _a : List[Any] =num_time_features _a : Optional[Any] =lags_sequence _a : str =embedding_dimension _a : Optional[Any] =is_training _a : Tuple =hidden_size _a : List[str] =num_hidden_layers _a : Dict =num_attention_heads _a : Dict =intermediate_size _a : Tuple =hidden_act _a : List[str] =hidden_dropout_prob _a : int =attention_probs_dropout_prob _a : List[Any] =context_length _a : str =prediction_length + label_length _a : Union[str, Any] =label_length _a : str =moving_average _a : Optional[int] =autocorrelation_factor def __UpperCAmelCase ( self :List[str] ) -> Optional[Any]: '''simple docstring''' return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Optional[int]: '''simple docstring''' _a : Optional[int] =config.context_length + max(config.lags_sequence ) _a : Optional[Any] =ids_tensor([self.batch_size, 1] , config.cardinality[0] ) _a : Optional[int] =floats_tensor([self.batch_size, _past_length, config.num_time_features] ) _a : List[str] =floats_tensor([self.batch_size, _past_length] ) _a : List[Any] =floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs _a : List[str] =floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) _a : int =floats_tensor([self.batch_size, config.prediction_length] ) _a : List[Any] ={ """past_values""": past_values, """static_categorical_features""": static_categorical_features, """past_time_features""": past_time_features, """past_observed_mask""": past_observed_mask, """future_time_features""": future_time_features, """future_values""": future_values, } return inputs_dict def __UpperCAmelCase ( self :List[str] ) -> Optional[int]: '''simple docstring''' _a : str =self.get_config() _a : Dict =self.prepare_autoformer_inputs_dict(SCREAMING_SNAKE_CASE ) return config, inputs_dict def __UpperCAmelCase ( self :Optional[Any] ) -> List[Any]: '''simple docstring''' _a , _a : Tuple =self.prepare_config_and_inputs() return config, inputs_dict def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Any ) -> Union[str, Any]: '''simple docstring''' _a : Any =AutoformerModel(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval() _a : Optional[Any] =model(**SCREAMING_SNAKE_CASE ) _a : int =outputs.encoder_last_hidden_state _a : List[Any] =outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: _a : Any =model.get_encoder() encoder.save_pretrained(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =AutoformerEncoder.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) _a , _a , _a , _a , _a : Optional[int] =model.create_network_inputs(**SCREAMING_SNAKE_CASE ) _a , _a : str =model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) _a : Optional[int] =torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) _a : Optional[Any] =encoder(inputs_embeds=SCREAMING_SNAKE_CASE )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) _a : Union[str, Any] =( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) _a : int =torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) _a : Union[str, Any] =torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) _a : List[str] =torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: _a : Tuple =model.get_decoder() decoder.save_pretrained(SCREAMING_SNAKE_CASE ) _a : List[Any] =AutoformerDecoder.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) _a : Dict =decoder( trend=SCREAMING_SNAKE_CASE , inputs_embeds=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : Optional[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () __UpperCamelCase : Union[str, Any] = (AutoformerForPrediction,) if is_torch_available() else () __UpperCamelCase : Optional[int] = {"feature-extraction": AutoformerModel} if is_torch_available() else {} __UpperCamelCase : Optional[int] = False __UpperCamelCase : Dict = False __UpperCamelCase : Optional[int] = False __UpperCamelCase : str = False __UpperCamelCase : Union[str, Any] = False __UpperCamelCase : int = False def __UpperCAmelCase ( self :int ) -> str: '''simple docstring''' _a : Dict =AutoformerModelTester(self ) _a : Dict =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self :Any ) -> Dict: '''simple docstring''' _a , _a : Optional[int] =self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: _a : Dict =model_class(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(SCREAMING_SNAKE_CASE ) _a , _a : Optional[Any] =model_class.from_pretrained(SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE ) self.assertEqual(info["""missing_keys"""] , [] ) def __UpperCAmelCase ( self :Any ) -> Union[str, Any]: '''simple docstring''' _a : Dict =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""Model has no tokens embeddings""" ) def __UpperCAmelCase ( self :List[str] ) -> Optional[int]: '''simple docstring''' pass def __UpperCAmelCase ( self :Dict ) -> List[Any]: '''simple docstring''' _a : Tuple =inspect.signature(getattr(SCREAMING_SNAKE_CASE , """forward""" ) ) # The main input is the name of the argument after `self` _a : List[Any] =list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :int ) -> Any: '''simple docstring''' _a , _a : Dict =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : int =model_class(SCREAMING_SNAKE_CASE ) _a : Optional[int] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : int =[*signature.parameters.keys()] _a : Optional[Any] =[ """past_values""", """past_time_features""", """past_observed_mask""", """static_categorical_features""", """static_real_features""", """future_values""", """future_time_features""", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("""future_observed_mask""" ) expected_arg_names.extend( [ """decoder_attention_mask""", """head_mask""", """decoder_head_mask""", """cross_attn_head_mask""", """encoder_outputs""", """past_key_values""", """output_hidden_states""", """output_attentions""", """use_cache""", """return_dict""", ] ) self.assertListEqual(arg_names[: len(SCREAMING_SNAKE_CASE )] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Any ) -> str: '''simple docstring''' _a , _a : Tuple =self.model_tester.prepare_config_and_inputs_for_common() _a : Union[str, Any] =True _a : Optional[Any] =getattr(self.model_tester , """seq_length""" , SCREAMING_SNAKE_CASE ) _a : Tuple =getattr(self.model_tester , """decoder_seq_length""" , SCREAMING_SNAKE_CASE ) _a : Optional[Any] =getattr(self.model_tester , """encoder_seq_length""" , SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =getattr(self.model_tester , """d_model""" , SCREAMING_SNAKE_CASE ) _a : Tuple =getattr(self.model_tester , """num_attention_heads""" , SCREAMING_SNAKE_CASE ) _a : List[Any] =d_model // num_attention_heads for model_class in self.all_model_classes: _a : int =True _a : Optional[Any] =False _a : Any =True _a : Union[str, Any] =model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): _a : Tuple =model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) _a : List[str] =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _a : Tuple =True _a : Tuple =model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): _a : Optional[Any] =model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) _a : Dict =outputs.encoder_attentions self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) _a : Tuple =len(SCREAMING_SNAKE_CASE ) _a : Optional[Any] =7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # decoder attentions _a : Optional[int] =outputs.decoder_attentions self.assertIsInstance(SCREAMING_SNAKE_CASE , (list, tuple) ) self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions _a : Any =outputs.cross_attentions self.assertIsInstance(SCREAMING_SNAKE_CASE , (list, tuple) ) self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine _a : int =True _a : Tuple =True _a : Tuple =model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): _a : List[str] =model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) self.assertEqual(out_len + 2 , len(SCREAMING_SNAKE_CASE ) ) _a : Any =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' super().test_retain_grad_hidden_states_attentions() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict="train-batch.pt" ) -> Optional[Any]: _a : Union[str, Any] =hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" ,filename=_UpperCAmelCase ,repo_type="""dataset""" ) _a : Dict =torch.load(_UpperCAmelCase ,map_location=_UpperCAmelCase ) return batch @require_torch @slow class A__ ( unittest.TestCase ): def __UpperCAmelCase ( self :Any ) -> Optional[int]: '''simple docstring''' _a : Optional[Any] =AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(SCREAMING_SNAKE_CASE ) _a : int =prepare_batch() with torch.no_grad(): _a : Any =model( past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0] _a : List[Any] =torch.Size( (6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) _a : Tuple =torch.tensor( [[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) ) def __UpperCAmelCase ( self :str ) -> Union[str, Any]: '''simple docstring''' _a : List[str] =AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(SCREAMING_SNAKE_CASE ) _a : List[str] =prepare_batch("""val-batch.pt""" ) with torch.no_grad(): _a : Tuple =model( past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state _a : str =torch.Size((6_4, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) _a : List[str] =torch.tensor( [[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) ) def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _a : List[str] =AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(SCREAMING_SNAKE_CASE ) _a : List[str] =prepare_batch("""val-batch.pt""" ) with torch.no_grad(): _a : int =model.generate( static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , ) _a : Union[str, Any] =torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , SCREAMING_SNAKE_CASE ) _a : List[Any] =torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=SCREAMING_SNAKE_CASE ) _a : Optional[int] =outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , SCREAMING_SNAKE_CASE , rtol=1e-1 ) )
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]: _a : Dict =current_set.copy() for row_index, row in enumerate(_UpperCAmelCase ): _a : Any =row[0] for column_index, column in enumerate(_UpperCAmelCase ): if magnitude == 0: _a : Any =column continue _a : Union[str, Any] =column / magnitude # Subtract to cancel term _a : Optional[Any] =current_set[0] _a : List[Any] =[first_row] _a : Tuple =current_set[1::] for row in current_set: _a : Any =[] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(_UpperCAmelCase ) continue for column_index in range(len(_UpperCAmelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(_UpperCAmelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: _a : List[str] =final_set[0] _a : Tuple =[] _a : Tuple =[] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _a : str =simplify(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): resultant[i].insert(0 ,current_first_column[i] ) resultant.insert(0 ,_UpperCAmelCase ) _a : List[Any] =resultant return final_set def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list: if len(_UpperCAmelCase ) == 0: raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) _a : str =len(_UpperCAmelCase ) + 1 if any(len(_UpperCAmelCase ) != _length for item in equations ): raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) for row in equations: if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ): raise ValueError("""solve_simultaneous() requires lists of integers""" ) if len(_UpperCAmelCase ) == 1: return [equations[0][-1] / equations[0][0]] _a : str =equations.copy() if any(0 in row for row in data_set ): _a : Optional[int] =data_set.copy() _a : str =[] for row_index, row in enumerate(_UpperCAmelCase ): if 0 not in row: _a : List[Any] =data_set.pop(_UpperCAmelCase ) break if not full_row: raise ValueError("""solve_simultaneous() requires at least 1 full equation""" ) data_set.insert(0 ,_UpperCAmelCase ) _a : Dict =data_set.copy() _a : Any =simplify(_UpperCAmelCase ) _a : Any =simplified[::-1] _a : list =[] for row in simplified: _a : Optional[Any] =row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(_UpperCAmelCase ) == 0: solutions.append(0 ) continue _a : List[str] =temp_row[1::] _a : int =temp_row[::-1] for column_index, column in enumerate(_UpperCAmelCase ): current_solution -= column * solutions[column_index] solutions.append(_UpperCAmelCase ) _a : Tuple =[] for item in solutions: final.append(float(round(_UpperCAmelCase ,5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() A__: int = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
694
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__: str = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Optional[Any] = [ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys A__: Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
694
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__: Dict = logging.get_logger(__name__) A__: Optional[int] = { '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = "markuplm" def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any: '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) _a : Any =vocab_size _a : List[str] =hidden_size _a : List[str] =num_hidden_layers _a : Tuple =num_attention_heads _a : Union[str, Any] =hidden_act _a : Tuple =intermediate_size _a : Optional[Any] =hidden_dropout_prob _a : int =attention_probs_dropout_prob _a : Any =max_position_embeddings _a : List[Any] =type_vocab_size _a : List[Any] =initializer_range _a : List[Any] =layer_norm_eps _a : Optional[int] =position_embedding_type _a : List[Any] =use_cache _a : List[str] =classifier_dropout # additional properties _a : int =max_depth _a : Union[str, Any] =max_xpath_tag_unit_embeddings _a : str =max_xpath_subs_unit_embeddings _a : int =tag_pad_id _a : List[Any] =subs_pad_id _a : str =xpath_unit_hidden_size
694
1
'''simple docstring''' import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ) -> int: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class A__ ( nn.Module ): def __init__( self :Dict , SCREAMING_SNAKE_CASE :nn.Module , SCREAMING_SNAKE_CASE :int ) -> Optional[Any]: '''simple docstring''' super().__init__() _a : Dict =module _a : Any =nn.Sequential( nn.Linear(module.in_features , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE ) , nn.Linear(SCREAMING_SNAKE_CASE , module.out_features , bias=SCREAMING_SNAKE_CASE ) , ) _a : List[Any] =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Dict , *SCREAMING_SNAKE_CASE :Tuple , **SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[int]: '''simple docstring''' return self.module(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) + self.adapter(SCREAMING_SNAKE_CASE ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module __UpperCamelCase : Optional[int] = "bigscience/bloom-1b7" # Constant values __UpperCamelCase : Any = 2.109_659_552_692_574 __UpperCamelCase : str = "Hello my name is" __UpperCamelCase : List[str] = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" ) EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" ) EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" ) __UpperCamelCase : Dict = 10 def __UpperCAmelCase ( self :Any ) -> List[Any]: '''simple docstring''' # Models and tokenizer _a : str =AutoTokenizer.from_pretrained(self.model_name ) class A__ ( UpperCAmelCase__ ): def __UpperCAmelCase ( self :Optional[int] ) -> Tuple: '''simple docstring''' super().setUp() # Models and tokenizer _a : List[str] =AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="""auto""" ) _a : List[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="""auto""" ) def __UpperCAmelCase ( self :Tuple ) -> Dict: '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self :List[Any] ) -> List[Any]: '''simple docstring''' _a : List[Any] =self.model_abit.config self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """quantization_config""" ) ) _a : Dict =config.to_dict() _a : Union[str, Any] =config.to_diff_dict() _a : str =config.to_json_string() def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' from bitsandbytes.nn import Paramsabit _a : Dict =self.model_fpaa.get_memory_footprint() _a : Dict =self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) _a : int =get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]: '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(SCREAMING_SNAKE_CASE , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def __UpperCAmelCase ( self :str ) -> Tuple: '''simple docstring''' _a : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ) _a : List[str] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=1_0 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS ) def __UpperCAmelCase ( self :Tuple ) -> str: '''simple docstring''' _a : Union[str, Any] =BitsAndBytesConfig() _a : Union[str, Any] =True _a : List[Any] =AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=SCREAMING_SNAKE_CASE , device_map="""auto""" ) _a : Any =self.tokenizer(self.input_text , return_tensors="""pt""" ) _a : List[Any] =model_abit_from_config.generate( input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=1_0 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS ) def __UpperCAmelCase ( self :Optional[Any] ) -> List[str]: '''simple docstring''' with self.assertRaises(SCREAMING_SNAKE_CASE ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :List[Any] ) -> Optional[Any]: '''simple docstring''' _a : Dict =BitsAndBytesConfig() with self.assertRaises(SCREAMING_SNAKE_CASE ): _a : Union[str, Any] =AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=SCREAMING_SNAKE_CASE , load_in_abit=SCREAMING_SNAKE_CASE , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , ) def __UpperCAmelCase ( self :Dict ) -> List[str]: '''simple docstring''' with self.assertRaises(SCREAMING_SNAKE_CASE ): # Tries with `str` self.model_abit.to("""cpu""" ) with self.assertRaises(SCREAMING_SNAKE_CASE ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(SCREAMING_SNAKE_CASE ): # Tries with a `device` self.model_abit.to(torch.device("""cuda:0""" ) ) with self.assertRaises(SCREAMING_SNAKE_CASE ): # Tries with a `device` self.model_abit.float() with self.assertRaises(SCREAMING_SNAKE_CASE ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything _a : List[Any] =self.tokenizer(self.input_text , return_tensors="""pt""" ) _a : List[str] =self.model_fpaa.to(torch.floataa ) _a : int =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=1_0 ) # Check this does not throw an error _a : Dict =self.model_fpaa.to("""cpu""" ) # Check this does not throw an error _a : List[Any] =self.model_fpaa.half() # Check this does not throw an error _a : Union[str, Any] =self.model_fpaa.float() def __UpperCAmelCase ( self :Any ) -> List[str]: '''simple docstring''' _a : Any =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=SCREAMING_SNAKE_CASE , device_map="""auto""" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): @classmethod def __UpperCAmelCase ( cls :List[Any] ) -> Any: '''simple docstring''' _a : Any ="""t5-small""" _a : Optional[int] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense _a : Union[str, Any] =AutoTokenizer.from_pretrained(cls.model_name ) _a : Union[str, Any] ="""Translate in German: Hello, my dog is cute""" def __UpperCAmelCase ( self :Optional[Any] ) -> List[str]: '''simple docstring''' gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self :Tuple ) -> Tuple: '''simple docstring''' from transformers import TaForConditionalGeneration _a : Tuple =TaForConditionalGeneration._keep_in_fpaa_modules _a : Optional[Any] =None # test with `t5-small` _a : List[Any] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="""auto""" ) _a : Any =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) _a : Union[str, Any] =model.generate(**SCREAMING_SNAKE_CASE ) # test with `flan-t5-small` _a : Optional[Any] =TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="""auto""" ) _a : str =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) _a : List[str] =model.generate(**SCREAMING_SNAKE_CASE ) _a : int =modules def __UpperCAmelCase ( self :Union[str, Any] ) -> str: '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` _a : Union[str, Any] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="""auto""" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) _a : List[Any] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) _a : List[Any] =model.generate(**SCREAMING_SNAKE_CASE ) # test with `flan-t5-small` _a : Union[str, Any] =TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="""auto""" ) _a : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) _a : int =model.generate(**SCREAMING_SNAKE_CASE ) class A__ ( UpperCAmelCase__ ): def __UpperCAmelCase ( self :List[Any] ) -> Tuple: '''simple docstring''' super().setUp() # model_name _a : str ="""bigscience/bloom-560m""" _a : List[Any] ="""t5-small""" # Different types of model _a : Optional[int] =AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="""auto""" ) # Sequence classification model _a : Dict =AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="""auto""" ) # CausalLM model _a : List[str] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="""auto""" ) # Seq2seq model _a : List[str] =AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="""auto""" ) def __UpperCAmelCase ( self :Any ) -> Any: '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self :Union[str, Any] ) -> str: '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class A__ ( UpperCAmelCase__ ): def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]: '''simple docstring''' super().setUp() def __UpperCAmelCase ( self :Any ) -> Any: '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self :Any ) -> List[str]: '''simple docstring''' _a : str =pipeline( """text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass _a : str =self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class A__ ( UpperCAmelCase__ ): def __UpperCAmelCase ( self :List[str] ) -> int: '''simple docstring''' super().setUp() def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' _a : int =AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="""balanced""" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model _a : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ) # Second real batch _a : List[Any] =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=1_0 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS ) class A__ ( UpperCAmelCase__ ): def __UpperCAmelCase ( self :Dict ) -> Optional[Any]: '''simple docstring''' _a : Optional[int] ="""facebook/opt-350m""" super().setUp() def __UpperCAmelCase ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ): return # Step 1: freeze all parameters _a : List[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): _a : Optional[int] =False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability _a : Any =param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE ) ): _a : Dict =LoRALayer(module.q_proj , rank=1_6 ) _a : Optional[int] =LoRALayer(module.k_proj , rank=1_6 ) _a : List[Any] =LoRALayer(module.v_proj , rank=1_6 ) # Step 3: dummy batch _a : Dict =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): _a : str =model.forward(**SCREAMING_SNAKE_CASE ) out.logits.norm().backward() for module in model.modules(): if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(SCREAMING_SNAKE_CASE , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : List[str] = "gpt2-xl" __UpperCamelCase : Tuple = 3.3_191_854_854_152_187
694
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''') def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict: hf_model.apply_weight_norm() _a : Any =checkpoint["""input_conv.weight_g"""] _a : Union[str, Any] =checkpoint["""input_conv.weight_v"""] _a : Optional[int] =checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): _a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"] _a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"] _a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): _a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"] _a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"] _a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"] _a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"] _a : Dict =checkpoint["""output_conv.1.weight_g"""] _a : str =checkpoint["""output_conv.1.weight_v"""] _a : Union[str, Any] =checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]: if config_path is not None: _a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase ) else: _a : str =SpeechTaHifiGanConfig() _a : Tuple =SpeechTaHifiGan(_UpperCAmelCase ) _a : int =torch.load(_UpperCAmelCase ) load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase ) _a : Dict =np.load(_UpperCAmelCase ) _a : Union[str, Any] =stats[0].reshape(-1 ) _a : Any =stats[1].reshape(-1 ) _a : Tuple =torch.from_numpy(_UpperCAmelCase ).float() _a : List[str] =torch.from_numpy(_UpperCAmelCase ).float() model.save_pretrained(_UpperCAmelCase ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) A__: Tuple = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
694
1
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class A__ ( UpperCAmelCase__ ): __UpperCamelCase : List[str] = (EulerDiscreteScheduler,) __UpperCamelCase : Optional[int] = 10 def __UpperCAmelCase ( self :Any , **SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Optional[int]: '''simple docstring''' _a : Union[str, Any] ={ """num_train_timesteps""": 1_1_0_0, """beta_start""": 0.0_001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**SCREAMING_SNAKE_CASE ) return config def __UpperCAmelCase ( self :List[Any] ) -> Optional[Any]: '''simple docstring''' for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> int: '''simple docstring''' for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE , beta_end=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Tuple ) -> Optional[Any]: '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[Any] ) -> Any: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :List[str] ) -> Any: '''simple docstring''' _a : Dict =self.scheduler_classes[0] _a : List[str] =self.get_scheduler_config() _a : List[str] =scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps ) _a : List[str] =torch.manual_seed(0 ) _a : Dict =self.dummy_model() _a : Any =self.dummy_sample_deter * scheduler.init_noise_sigma _a : Dict =sample.to(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): _a : Union[str, Any] =scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Optional[int] =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Dict =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ) _a : Any =output.prev_sample _a : Dict =torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) _a : Optional[int] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 10.0_807 ) < 1e-2 assert abs(result_mean.item() - 0.0_131 ) < 1e-3 def __UpperCAmelCase ( self :Tuple ) -> Dict: '''simple docstring''' _a : Tuple =self.scheduler_classes[0] _a : Union[str, Any] =self.get_scheduler_config(prediction_type="""v_prediction""" ) _a : Tuple =scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps ) _a : Optional[int] =torch.manual_seed(0 ) _a : Dict =self.dummy_model() _a : Union[str, Any] =self.dummy_sample_deter * scheduler.init_noise_sigma _a : List[Any] =sample.to(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): _a : Dict =scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Optional[Any] =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Tuple =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ) _a : Dict =output.prev_sample _a : Tuple =torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) _a : Optional[Any] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 0.0_002 ) < 1e-2 assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3 def __UpperCAmelCase ( self :List[Any] ) -> Dict: '''simple docstring''' _a : int =self.scheduler_classes[0] _a : int =self.get_scheduler_config() _a : List[str] =scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE ) _a : Any =torch.manual_seed(0 ) _a : str =self.dummy_model() _a : str =self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _a : List[str] =sample.to(SCREAMING_SNAKE_CASE ) for t in scheduler.timesteps: _a : List[str] =scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Any =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ) _a : List[Any] =output.prev_sample _a : int =torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) _a : List[Any] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 10.0_807 ) < 1e-2 assert abs(result_mean.item() - 0.0_131 ) < 1e-3 def __UpperCAmelCase ( self :str ) -> str: '''simple docstring''' _a : int =self.scheduler_classes[0] _a : Union[str, Any] =self.get_scheduler_config() _a : Union[str, Any] =scheduler_class(**SCREAMING_SNAKE_CASE , use_karras_sigmas=SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =torch.manual_seed(0 ) _a : Union[str, Any] =self.dummy_model() _a : List[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _a : Dict =sample.to(SCREAMING_SNAKE_CASE ) for t in scheduler.timesteps: _a : List[Any] =scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Tuple =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : List[str] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ) _a : Optional[Any] =output.prev_sample _a : List[str] =torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) _a : Optional[int] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2 assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
694
'''simple docstring''' class A__ : def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]: '''simple docstring''' _a : List[str] =None _a : Optional[Any] =None _a : str =graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Optional[int] =len(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =None def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any: '''simple docstring''' if sources is int: _a : Tuple =[sources] if sinks is int: _a : Optional[int] =[sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return _a : Union[str, Any] =sources[0] _a : Tuple =sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: _a : Tuple =0 for i in sources: max_input_flow += sum(self.graph[i] ) _a : List[Any] =len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _a : Any =max_input_flow _a : List[str] =0 _a : List[str] =len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _a : str =max_input_flow _a : Optional[Any] =size - 1 def __UpperCAmelCase ( self :Optional[int] ) -> Tuple: '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int: '''simple docstring''' _a : Tuple =algorithm(self ) class A__ : def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict: '''simple docstring''' _a : List[str] =flow_network _a : List[Any] =flow_network.verticesCount _a : str =flow_network.sourceIndex _a : str =flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _a : List[Any] =flow_network.graph _a : Optional[int] =False def __UpperCAmelCase ( self :List[Any] ) -> List[str]: '''simple docstring''' if not self.executed: self._algorithm() _a : Any =True def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]: '''simple docstring''' pass class A__ ( UpperCAmelCase__ ): def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result _a : List[Any] =-1 def __UpperCAmelCase ( self :Dict ) -> Tuple: '''simple docstring''' if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class A__ ( UpperCAmelCase__ ): def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) _a : int =[[0] * self.verticies_count for i in range(self.verticies_count )] _a : Union[str, Any] =[0] * self.verticies_count _a : Optional[Any] =[0] * self.verticies_count def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]: '''simple docstring''' _a : int =self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _a : Tuple =[ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _a : List[Any] =0 while i < len(SCREAMING_SNAKE_CASE ): _a : Any =vertices_list[i] _a : str =self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) _a : List[str] =0 else: i += 1 _a : Optional[int] =sum(self.preflow[self.source_index] ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]: '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' _a : List[str] =min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]: '''simple docstring''' _a : int =None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _a : Optional[Any] =self.heights[to_index] if min_height is not None: _a : Any =min_height + 1 if __name__ == "__main__": A__: str = [0] A__: Optional[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network A__: Union[str, Any] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate A__: List[str] = flow_network.find_maximum_flow() print(F"maximum flow is {maximum_flow}")
694
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging A__: str = logging.get_logger(__name__) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = "encoder-decoder" __UpperCamelCase : Union[str, Any] = True def __init__( self :int , **SCREAMING_SNAKE_CASE :List[str] ) -> Dict: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" _a : str =kwargs.pop("""encoder""" ) _a : Optional[Any] =encoder_config.pop("""model_type""" ) _a : Optional[Any] =kwargs.pop("""decoder""" ) _a : Dict =decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig _a : List[str] =AutoConfig.for_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) _a : Optional[Any] =AutoConfig.for_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) _a : str =True @classmethod def __UpperCAmelCase ( cls :Any , SCREAMING_SNAKE_CASE :PretrainedConfig , SCREAMING_SNAKE_CASE :PretrainedConfig , **SCREAMING_SNAKE_CASE :List[Any] ) -> PretrainedConfig: '''simple docstring''' logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) _a : Union[str, Any] =True _a : Optional[Any] =True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Dict ) -> str: '''simple docstring''' _a : Optional[int] =copy.deepcopy(self.__dict__ ) _a : Tuple =self.encoder.to_dict() _a : Dict =self.decoder.to_dict() _a : Optional[Any] =self.__class__.model_type return output
694
'''simple docstring''' A__: Optional[int] = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] A__: int = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
694
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : list[float] ) -> float: if discount_rate < 0: raise ValueError("""Discount rate cannot be negative""" ) if not cash_flows: raise ValueError("""Cash flows list cannot be empty""" ) _a : Dict =sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_UpperCAmelCase ) ) return round(_UpperCAmelCase ,ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F"{price_plus_tax(100, 0.25) = }") print(F"{price_plus_tax(125.50, 0.05) = }")
694
1
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class A__ ( unittest.TestCase ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple: '''simple docstring''' _a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8} _a : int =parent _a : Optional[int] =batch_size _a : List[str] =num_channels _a : Optional[Any] =image_size _a : int =min_resolution _a : str =max_resolution _a : str =do_resize _a : Tuple =size _a : Tuple =do_normalize def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804], [-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self :List[Any] ) -> List[Any]: '''simple docstring''' _a : Any =ImageGPTImageProcessingTester(self ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self :Dict ) -> Any: '''simple docstring''' _a : Optional[Any] =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) ) def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]: '''simple docstring''' _a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} ) _a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} ) def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]: '''simple docstring''' _a : List[str] =self.image_processing_class(**self.image_processor_dict ) _a : Dict =json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) ) else: self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' _a : List[Any] =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" ) image_processor_first.to_json_file(SCREAMING_SNAKE_CASE ) _a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict() _a : Tuple =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> str: '''simple docstring''' _a : List[str] =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE ) _a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict() _a : Union[str, Any] =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: _a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" ) _a : Dict =Image.open(dataset[4]["""file"""] ) _a : Optional[int] =Image.open(dataset[5]["""file"""] ) _a : Optional[Any] =[imagea, imagea] return images @require_vision @require_torch class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) _a : int =prepare_images() # test non-batched _a : Dict =image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) ) _a : Optional[int] =[3_0_6, 1_9_1, 1_9_1] self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE ) # test batched _a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) ) _a : Any =[3_0_3, 1_3, 1_3] self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
694
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class A__ ( unittest.TestCase ): def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple: '''simple docstring''' _a : Optional[Any] =parent _a : List[str] =batch_size _a : List[str] =seq_length _a : List[Any] =is_training _a : Optional[int] =use_attention_mask _a : List[Any] =use_token_type_ids _a : List[Any] =use_labels _a : Optional[Any] =vocab_size _a : str =hidden_size _a : List[Any] =num_hidden_layers _a : List[Any] =num_attention_heads _a : Union[str, Any] =intermediate_size _a : int =hidden_act _a : List[str] =hidden_dropout_prob _a : Optional[int] =attention_probs_dropout_prob _a : Dict =max_position_embeddings _a : Any =type_vocab_size _a : str =type_sequence_label_size _a : str =initializer_range _a : List[str] =num_choices def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict: '''simple docstring''' _a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : Dict =None if self.use_attention_mask: _a : Any =random_attention_mask([self.batch_size, self.seq_length] ) _a : Optional[int] =None if self.use_token_type_ids: _a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _a : Union[str, Any] =RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' _a : Tuple =self.prepare_config_and_inputs() _a , _a , _a , _a : List[Any] =config_and_inputs _a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def __UpperCAmelCase ( self :int ) -> str: '''simple docstring''' _a : List[Any] =self.prepare_config_and_inputs() _a , _a , _a , _a : Optional[int] =config_and_inputs _a : Tuple =True _a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : Union[str, Any] = True __UpperCamelCase : Dict = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCAmelCase ( self :List[str] ) -> Optional[int]: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self ) @slow def __UpperCAmelCase ( self :str ) -> int: '''simple docstring''' for model_class_name in self.all_model_classes: _a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Dict =model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_flax class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Any ) -> str: '''simple docstring''' _a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Dict =model(SCREAMING_SNAKE_CASE )[0] _a : List[Any] =[1, 1_1, 5_0_2_6_5] self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. _a : Any =np.array( [[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def __UpperCAmelCase ( self :int ) -> int: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. _a : str =np.array( [[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
694
1
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class A__ : def __init__( self :str , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[int]=2 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :Dict=False , SCREAMING_SNAKE_CASE :Tuple=1_0 , SCREAMING_SNAKE_CASE :Dict=3 , SCREAMING_SNAKE_CASE :Any=3_2 * 4 , SCREAMING_SNAKE_CASE :Union[str, Any]=3_2 * 6 , SCREAMING_SNAKE_CASE :Optional[int]=4 , SCREAMING_SNAKE_CASE :int=3_2 , ) -> str: '''simple docstring''' _a : Any =parent _a : Any =batch_size _a : Dict =is_training _a : Optional[int] =use_auxiliary_loss _a : str =num_queries _a : Tuple =num_channels _a : int =min_size _a : Any =max_size _a : List[str] =num_labels _a : int =mask_feature_size def __UpperCAmelCase ( self :int ) -> Any: '''simple docstring''' _a : Dict =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( SCREAMING_SNAKE_CASE ) _a : Optional[int] =torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE ) _a : List[str] =( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE ) > 0.5 ).float() _a : Optional[Any] =(torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE ) > 0.5).long() _a : List[Any] =self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __UpperCAmelCase ( self :int ) -> Union[str, Any]: '''simple docstring''' return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def __UpperCAmelCase ( self :Optional[Any] ) -> List[Any]: '''simple docstring''' _a , _a , _a , _a , _a : List[Any] =self.prepare_config_and_inputs() _a : List[Any] ={"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple ) -> List[Any]: '''simple docstring''' _a : List[str] =output.encoder_hidden_states _a : List[str] =output.pixel_decoder_hidden_states _a : Tuple =output.transformer_decoder_hidden_states self.parent.assertTrue(len(SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE ) , config.decoder_config.decoder_layers ) def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[int]=False ) -> int: '''simple docstring''' with torch.no_grad(): _a : int =MaskFormerModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : List[str] =model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE ) _a : Optional[int] =model(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _a : List[Any] =MaskFormerForInstanceSegmentation(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() def comm_check_on_output(SCREAMING_SNAKE_CASE :Tuple ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _a : Union[str, Any] =model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE ) _a : str =model(SCREAMING_SNAKE_CASE ) comm_check_on_output(SCREAMING_SNAKE_CASE ) _a : Dict =model( pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE ) comm_check_on_output(SCREAMING_SNAKE_CASE ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () __UpperCamelCase : Optional[int] = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) __UpperCamelCase : Optional[int] = False __UpperCamelCase : int = False __UpperCamelCase : Dict = False __UpperCamelCase : Union[str, Any] = False def __UpperCAmelCase ( self :str ) -> Optional[int]: '''simple docstring''' _a : Optional[int] =MaskFormerModelTester(self ) _a : Dict =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :str ) -> Dict: '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self :Dict ) -> Any: '''simple docstring''' _a , _a : Any =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :str ) -> Optional[int]: '''simple docstring''' _a : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def __UpperCAmelCase ( self :str ) -> Any: '''simple docstring''' pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def __UpperCAmelCase ( self :Optional[int] ) -> List[str]: '''simple docstring''' pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def __UpperCAmelCase ( self :List[Any] ) -> int: '''simple docstring''' pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def __UpperCAmelCase ( self :Any ) -> Dict: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def __UpperCAmelCase ( self :Any ) -> Optional[int]: '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def __UpperCAmelCase ( self :int ) -> List[str]: '''simple docstring''' pass def __UpperCAmelCase ( self :Tuple ) -> List[Any]: '''simple docstring''' _a , _a : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : Dict =model_class(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : int =[*signature.parameters.keys()] _a : str =["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) @slow def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple: '''simple docstring''' for model_name in ["facebook/maskformer-swin-small-coco"]: _a : Any =MaskFormerModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :List[Any] ) -> Tuple: '''simple docstring''' _a : Tuple =(self.model_tester.min_size,) * 2 _a : int ={ """pixel_values""": torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE ), """mask_labels""": torch.randn((2, 1_0, *size) , device=SCREAMING_SNAKE_CASE ), """class_labels""": torch.zeros(2 , 1_0 , device=SCREAMING_SNAKE_CASE ).long(), } _a : Optional[Any] =MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(SCREAMING_SNAKE_CASE ) _a : List[Any] =model(**SCREAMING_SNAKE_CASE ) self.assertTrue(outputs.loss is not None ) def __UpperCAmelCase ( self :Tuple ) -> Dict: '''simple docstring''' _a , _a : str =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :List[str] ) -> Any: '''simple docstring''' _a , _a : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : str =model_class(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) _a : List[Any] =model(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE ) self.assertTrue(outputs.attentions is not None ) def __UpperCAmelCase ( self :str ) -> str: '''simple docstring''' if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _a : Tuple =self.all_model_classes[1] _a , _a , _a , _a , _a : Dict =self.model_tester.prepare_config_and_inputs() _a : List[Any] =model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.train() _a : str =model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE ).loss loss.backward() def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]: '''simple docstring''' # only MaskFormerForInstanceSegmentation has the loss _a : str =self.all_model_classes[1] _a , _a , _a , _a , _a : Optional[int] =self.model_tester.prepare_config_and_inputs() _a : int =True _a : Union[str, Any] =True _a : Union[str, Any] =model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.train() _a : Any =model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE ) _a : str =outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _a : List[Any] =outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _a : List[str] =outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _a : Optional[Any] =outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) A__: Dict = 1E-4 def SCREAMING_SNAKE_CASE_ ( ) -> Dict: _a : Optional[int] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class A__ ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' _a : Dict =MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(SCREAMING_SNAKE_CASE ) _a : Tuple =self.default_image_processor _a : Dict =prepare_img() _a : Union[str, Any] =image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE ) _a : Optional[Any] =inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _a : Any =model(**SCREAMING_SNAKE_CASE ) _a : List[str] =torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) ) _a : List[str] =torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) ) _a : Optional[Any] =torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) ) def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' _a : Tuple =( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(SCREAMING_SNAKE_CASE ) .eval() ) _a : List[str] =self.default_image_processor _a : Dict =prepare_img() _a : int =image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE ) _a : Any =inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _a : List[str] =model(**SCREAMING_SNAKE_CASE ) # masks_queries_logits _a : int =outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _a : str =[ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] _a : Optional[Any] =torch.tensor(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) ) # class_queries_logits _a : Union[str, Any] =outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _a : Dict =torch.tensor( [ [1.6512e00, -5.2572e00, -3.3519e00], [3.6169e-02, -5.9025e00, -2.9313e00], [1.0766e-04, -7.7630e00, -5.1263e00], ] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) ) def __UpperCAmelCase ( self :Optional[Any] ) -> Any: '''simple docstring''' _a : List[Any] =( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(SCREAMING_SNAKE_CASE ) .eval() ) _a : int =self.default_image_processor _a : str =prepare_img() _a : int =image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE ) _a : str =inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _a : Dict =model(**SCREAMING_SNAKE_CASE ) # masks_queries_logits _a : Optional[Any] =outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _a : List[str] =[[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] _a : List[str] =torch.tensor(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) ) # class_queries_logits _a : List[str] =outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _a : Any =torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) ) def __UpperCAmelCase ( self :str ) -> int: '''simple docstring''' _a : Union[str, Any] =( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(SCREAMING_SNAKE_CASE ) .eval() ) _a : Any =self.default_image_processor _a : Dict =image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="""pt""" , ) _a : str =inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE ) _a : int =[el.to(SCREAMING_SNAKE_CASE ) for el in inputs["""mask_labels"""]] _a : Optional[Any] =[el.to(SCREAMING_SNAKE_CASE ) for el in inputs["""class_labels"""]] with torch.no_grad(): _a : Dict =model(**SCREAMING_SNAKE_CASE ) self.assertTrue(outputs.loss is not None )
694
'''simple docstring''' import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A__: Tuple = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]: return field(default_factory=lambda: default ,metadata=_UpperCAmelCase ) @dataclass class A__ : __UpperCamelCase : List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) __UpperCamelCase : List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) __UpperCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) __UpperCamelCase : str = field( default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , ) __UpperCamelCase : str = field( default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , ) __UpperCamelCase : str = field( default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , ) __UpperCamelCase : str = field( default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , ) __UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' warnings.warn( f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils" """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[str]: '''simple docstring''' if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
694
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__: Dict = logging.get_logger(__name__) A__: Optional[int] = { '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = "markuplm" def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any: '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) _a : Any =vocab_size _a : List[str] =hidden_size _a : List[str] =num_hidden_layers _a : Tuple =num_attention_heads _a : Union[str, Any] =hidden_act _a : Tuple =intermediate_size _a : Optional[Any] =hidden_dropout_prob _a : int =attention_probs_dropout_prob _a : Any =max_position_embeddings _a : List[Any] =type_vocab_size _a : List[Any] =initializer_range _a : List[Any] =layer_norm_eps _a : Optional[int] =position_embedding_type _a : List[Any] =use_cache _a : List[str] =classifier_dropout # additional properties _a : int =max_depth _a : Union[str, Any] =max_xpath_tag_unit_embeddings _a : str =max_xpath_subs_unit_embeddings _a : int =tag_pad_id _a : List[Any] =subs_pad_id _a : str =xpath_unit_hidden_size
694
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class A__ ( UpperCAmelCase__ ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]: '''simple docstring''' _a : int =1.0 if scale is None else scale _a : Optional[Any] =0.0 if loc is None else loc super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] ) @property def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return self.base_dist.mean * self.scale + self.loc @property def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return self.base_dist.variance * self.scale**2 @property def __UpperCAmelCase ( self :Any ) -> List[str]: '''simple docstring''' return self.variance.sqrt() class A__ ( nn.Module ): def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE ) _a : Tuple =args_dim _a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] ) _a : Dict =domain_map def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]: '''simple docstring''' _a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj] return self.domain_map(*SCREAMING_SNAKE_CASE ) class A__ ( nn.Module ): def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int: '''simple docstring''' super().__init__() _a : List[Any] =function def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]: '''simple docstring''' return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) class A__ : __UpperCamelCase : type __UpperCamelCase : int __UpperCamelCase : Dict[str, int] def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None: '''simple docstring''' _a : Any =dim _a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim} def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]: '''simple docstring''' if self.dim == 1: return self.distribution_class(*SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution: '''simple docstring''' _a : str =self._base_distribution(SCREAMING_SNAKE_CASE ) if loc is None and scale is None: return distr else: return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim ) @property def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' return () if self.dim == 1 else (self.dim,) @property def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return len(self.event_shape ) @property def __UpperCAmelCase ( self :Any ) -> float: '''simple docstring''' return 0.0 def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module: '''simple docstring''' return ParameterProjection( in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any: '''simple docstring''' raise NotImplementedError() @staticmethod def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor: '''simple docstring''' return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0 class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __UpperCamelCase : type = StudentT @classmethod def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]: '''simple docstring''' _a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) _a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1} __UpperCamelCase : type = Normal @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict: '''simple docstring''' _a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1} __UpperCamelCase : type = NegativeBinomial @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]: '''simple docstring''' _a : int =cls.squareplus(SCREAMING_SNAKE_CASE ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution: '''simple docstring''' _a , _a : Any =distr_args if self.dim == 1: return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution: '''simple docstring''' _a , _a : Optional[int] =distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
694
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list: _a : Tuple =len(_UpperCAmelCase ) _a : str =[] for i in range(len(_UpperCAmelCase ) - pat_len + 1 ): _a : int =True for j in range(_UpperCAmelCase ): if s[i + j] != pattern[j]: _a : int =False break if match_found: position.append(_UpperCAmelCase ) return position if __name__ == "__main__": assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3] print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number | (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number & ~(1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number ^ (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool: return ((number >> position) & 1) == 1 def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) A__: Any = { '''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''], '''tokenization_lxmert''': ['''LxmertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: List[Any] = ['''LxmertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Tuple = [ '''LxmertEncoder''', '''LxmertForPreTraining''', '''LxmertForQuestionAnswering''', '''LxmertModel''', '''LxmertPreTrainedModel''', '''LxmertVisualFeatureEncoder''', '''LxmertXLayer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Tuple = [ '''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLxmertForPreTraining''', '''TFLxmertMainLayer''', '''TFLxmertModel''', '''TFLxmertPreTrainedModel''', '''TFLxmertVisualFeatureEncoder''', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys A__: Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None: if point: if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): for item in point: if not isinstance(_UpperCAmelCase ,(int, float) ): _a : str =( """Expected a list of numbers as input, found """ F"{type(_UpperCAmelCase ).__name__}" ) raise TypeError(_UpperCAmelCase ) else: _a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}" raise TypeError(_UpperCAmelCase ) else: raise ValueError("""Missing an input""" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 A__: Any = { # 1536-bit 5: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 2048-bit 14: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 3072-bit 15: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 4096-bit 16: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199''' + '''FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 6144-bit 17: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08''' + '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B''' + '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9''' + '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6''' + '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8''' + '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C''' + '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718''' + '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D''' + '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D''' + '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226''' + '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC''' + '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26''' + '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB''' + '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2''' + '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127''' + '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406''' + '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918''' + '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151''' + '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03''' + '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F''' + '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B''' + '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632''' + '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E''' + '''6DCC4024FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 8192-bit 18: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD''' + '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831''' + '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B''' + '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF''' + '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6''' + '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3''' + '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328''' + '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C''' + '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE''' + '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4''' + '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300''' + '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568''' + '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9''' + '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B''' + '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A''' + '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36''' + '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1''' + '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92''' + '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47''' + '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71''' + '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, } class A__ : def __init__( self :int , SCREAMING_SNAKE_CASE :int = 1_4 ) -> None: '''simple docstring''' if group not in primes: raise ValueError("""Unsupported Group""" ) _a : Any =primes[group]["""prime"""] _a : Optional[int] =primes[group]["""generator"""] _a : Any =int(hexlify(urandom(3_2 ) ) , base=1_6 ) def __UpperCAmelCase ( self :str ) -> str: '''simple docstring''' return hex(self.__private_key )[2:] def __UpperCAmelCase ( self :Tuple ) -> str: '''simple docstring''' _a : Any =pow(self.generator , self.__private_key , self.prime ) return hex(SCREAMING_SNAKE_CASE )[2:] def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :int ) -> bool: '''simple docstring''' # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(SCREAMING_SNAKE_CASE , (self.prime - 1) // 2 , self.prime ) == 1 ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> str: '''simple docstring''' _a : Dict =int(SCREAMING_SNAKE_CASE , base=1_6 ) if not self.is_valid_public_key(SCREAMING_SNAKE_CASE ): raise ValueError("""Invalid public key""" ) _a : List[str] =pow(SCREAMING_SNAKE_CASE , self.__private_key , self.prime ) return shaaaa(str(SCREAMING_SNAKE_CASE ).encode() ).hexdigest() @staticmethod def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int ) -> bool: '''simple docstring''' # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(SCREAMING_SNAKE_CASE , (prime - 1) // 2 , SCREAMING_SNAKE_CASE ) == 1 ) @staticmethod def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :int = 1_4 ) -> str: '''simple docstring''' _a : List[str] =int(SCREAMING_SNAKE_CASE , base=1_6 ) _a : Dict =int(SCREAMING_SNAKE_CASE , base=1_6 ) _a : Dict =primes[group]["""prime"""] if not DiffieHellman.is_valid_public_key_static(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise ValueError("""Invalid public key""" ) _a : Dict =pow(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return shaaaa(str(SCREAMING_SNAKE_CASE ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' from __future__ import annotations class A__ : def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None: '''simple docstring''' _a : int =order # a_{0} ... a_{k} _a : Optional[Any] =[1.0] + [0.0] * order # b_{0} ... b_{k} _a : Tuple =[1.0] + [0.0] * order # x[n-1] ... x[n-k] _a : List[Any] =[0.0] * self.order # y[n-1] ... y[n-k] _a : Tuple =[0.0] * self.order def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None: '''simple docstring''' if len(SCREAMING_SNAKE_CASE ) < self.order: _a : int =[1.0, *a_coeffs] if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : int =( f"Expected a_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : Optional[Any] =( f"Expected b_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) _a : List[str] =a_coeffs _a : Union[str, Any] =b_coeffs def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float: '''simple docstring''' _a : str =0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) _a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0] _a : str =self.input_history[:-1] _a : Optional[Any] =self.output_history[:-1] _a : Optional[int] =sample _a : Tuple =result return result
694
1
'''simple docstring''' import math def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(_UpperCAmelCase ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float = 0.1 ) -> int: _a : str =3 _a : Any =3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 ,(j + 2) * (j + 2) ,j + 1 ): primes += is_prime(_UpperCAmelCase ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int: for nxt, d in graph[v]: if nxt in visited_forward: continue _a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf ) _a : int =cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) _a : Tuple =new_cost_f _a : Optional[Any] =v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: _a : str =cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int: _a : Optional[Any] =-1 _a : List[str] =set() _a : Optional[int] =set() _a : Optional[int] ={source: 0} _a : List[str] ={destination: 0} _a : Union[str, Any] ={source: None} _a : Dict ={destination: None} _a : PriorityQueue[Any] =PriorityQueue() _a : PriorityQueue[Any] =PriorityQueue() _a : Optional[int] =np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): _a , _a : str =queue_forward.get() visited_forward.add(_UpperCAmelCase ) _a , _a : List[Any] =queue_backward.get() visited_backward.add(_UpperCAmelCase ) _a : int =pass_and_relaxation( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) _a : Any =pass_and_relaxation( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: _a : Any =shortest_distance return shortest_path_distance A__: Union[str, Any] = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } A__: str = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A__: Optional[int] = logging.get_logger(__name__) A__: Union[str, Any] = '''▁''' A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''} A__: Optional[int] = { '''vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''', }, '''monolingual_vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''', }, } A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024} class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = VOCAB_FILES_NAMES __UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None: '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it _a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token _a : int ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , ) _a : Dict =vocab_file _a : int =monolingual_vocab_file _a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _a : List[Any] ={} _a : List[str] =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: _a : Optional[Any] =cnt cnt += 1 with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _a : int =line.strip().split()[0] _a : str =len(self.fairseq_tokens_to_ids ) if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: _a : Optional[int] =len(self.fairseq_tokens_to_ids ) _a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self :int ) -> List[Any]: '''simple docstring''' _a : Optional[int] =self.__dict__.copy() _a : Optional[Any] =None _a : str =self.sp_model.serialized_model_proto() return state def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str: '''simple docstring''' _a : List[str] =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _a : Tuple ={} _a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _a : Optional[int] =[self.cls_token_id] _a : int =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _a : List[str] =[self.sep_token_id] _a : int =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __UpperCAmelCase ( self :Optional[int] ) -> int: '''simple docstring''' return len(self.fairseq_ids_to_tokens ) def __UpperCAmelCase ( self :int ) -> Union[str, Any]: '''simple docstring''' _a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict: '''simple docstring''' return self.fairseq_ids_to_tokens[index] def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip() return out_string def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _a : int =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _a : Any =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi: _a : Optional[Any] =self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" ) return out_vocab_file, out_monolingual_vocab_file
694
'''simple docstring''' from math import factorial def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int: return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
694
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str = " " ) -> list: _a : Any =[] _a : Optional[int] =0 for index, char in enumerate(_UpperCAmelCase ): if char == separator: split_words.append(string[last_index:index] ) _a : Any =index + 1 elif index + 1 == len(_UpperCAmelCase ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list: _a : Tuple =len(_UpperCAmelCase ) _a : str =[] for i in range(len(_UpperCAmelCase ) - pat_len + 1 ): _a : int =True for j in range(_UpperCAmelCase ): if s[i + j] != pattern[j]: _a : int =False break if match_found: position.append(_UpperCAmelCase ) return position if __name__ == "__main__": assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3] print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
694
1
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[Any] ) -> List[str]: for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad ,grad_param.grad ) is False ), F"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})" else: # Grads should be in sync assert ( torch.allclose(param.grad ,grad_param.grad ) is True ), F"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})" def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any=True ) -> List[str]: model.train() _a : Tuple =model(_UpperCAmelCase ) _a : Any =F.mse_loss(_UpperCAmelCase ,target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Union[str, Any]=False ) -> List[str]: set_seed(42 ) _a : List[Any] =RegressionModel() _a : List[Any] =deepcopy(_UpperCAmelCase ) _a : Union[str, Any] =RegressionDataset(length=80 ) _a : Tuple =DataLoader(_UpperCAmelCase ,batch_size=16 ) model.to(accelerator.device ) if sched: _a : Optional[int] =AdamW(params=model.parameters() ,lr=1e-3 ) _a : List[Any] =AdamW(params=ddp_model.parameters() ,lr=1e-3 ) _a : Dict =LambdaLR(_UpperCAmelCase ,lr_lambda=lambda _UpperCAmelCase : epoch**0.6_5 ) _a : str =LambdaLR(_UpperCAmelCase ,lr_lambda=lambda _UpperCAmelCase : epoch**0.6_5 ) # Make a copy of `model` if sched: _a , _a , _a , _a : Dict =accelerator.prepare(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) else: _a , _a : List[Any] =accelerator.prepare(_UpperCAmelCase ,_UpperCAmelCase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ) -> Tuple: # Test when on a single CPU or GPU that the context manager does nothing _a , _a , _a : List[Any] =get_training_setup(_UpperCAmelCase ) # Use a single batch _a , _a : Union[str, Any] =next(iter(_UpperCAmelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _a , _a : Tuple =accelerator.gather((ddp_input, ddp_target) ) _a , _a : List[Any] =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_UpperCAmelCase ): step_model(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) else: # Sync grads step_model(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad ,ddp_param.grad ), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a : str =ddp_input[torch.randperm(len(_UpperCAmelCase ) )] def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ) -> Dict: # Test on distributed setup that context manager behaves properly _a , _a , _a : List[Any] =get_training_setup(_UpperCAmelCase ) # Use a single batch _a , _a : str =next(iter(_UpperCAmelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _a , _a : Tuple =accelerator.gather((ddp_input, ddp_target) ) _a , _a : int =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_UpperCAmelCase ): step_model(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) else: # Sync grads step_model(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is False ), F"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" else: # Grads should be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is True ), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a : Optional[int] =ddp_input[torch.randperm(len(_UpperCAmelCase ) )] def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any]=False ,_UpperCAmelCase : Dict=False ) -> Dict: _a : Tuple =Accelerator( split_batches=_UpperCAmelCase ,dispatch_batches=_UpperCAmelCase ,gradient_accumulation_steps=2 ) # Test that context manager behaves properly _a , _a , _a : str =get_training_setup(_UpperCAmelCase ) for iteration, batch in enumerate(_UpperCAmelCase ): _a , _a : Dict =batch.values() # Gather the distributed inputs and targs for the base model _a , _a : Dict =accelerator.gather((ddp_input, ddp_target) ) _a , _a : Tuple =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(_UpperCAmelCase ): step_model(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(_UpperCAmelCase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is True ), F"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" else: # Grads should not be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is False ), F"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a : Optional[int] =ddp_input[torch.randperm(len(_UpperCAmelCase ) )] GradientState._reset_state() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=False ,_UpperCAmelCase : int=False ) -> int: _a : Optional[Any] =Accelerator( split_batches=_UpperCAmelCase ,dispatch_batches=_UpperCAmelCase ,gradient_accumulation_steps=2 ) # Test that context manager behaves properly _a , _a , _a , _a , _a , _a , _a : Optional[int] =get_training_setup(_UpperCAmelCase ,_UpperCAmelCase ) for iteration, batch in enumerate(_UpperCAmelCase ): _a , _a : Tuple =batch.values() # Gather the distributed inputs and targs for the base model _a , _a : str =accelerator.gather((ddp_input, ddp_target) ) _a , _a : int =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_UpperCAmelCase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(_UpperCAmelCase ): step_model(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n" _a : Optional[int] =(((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_UpperCAmelCase )) if accelerator.num_processes > 1: check_model_parameters(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]: _a : Dict =Accelerator() _a : Optional[Any] =RegressionDataset(length=80 ) _a : Any =DataLoader(_UpperCAmelCase ,batch_size=16 ) _a : int =RegressionDataset(length=96 ) _a : str =DataLoader(_UpperCAmelCase ,batch_size=16 ) _a , _a : List[str] =accelerator.prepare(_UpperCAmelCase ,_UpperCAmelCase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(_UpperCAmelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(_UpperCAmelCase ) if iteration < len(_UpperCAmelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(_UpperCAmelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(_UpperCAmelCase ) if batch_num < len(_UpperCAmelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def SCREAMING_SNAKE_CASE_ ( ) -> Dict: _a : Optional[Any] =Accelerator() _a : Dict =accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(_UpperCAmelCase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(_UpperCAmelCase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ ,F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" ,) test_gradient_accumulation(_UpperCAmelCase ,_UpperCAmelCase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" ,"""2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,"""`split_batches=False`, `dispatch_batches=False`**""" ,) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" ,) test_gradient_accumulation_with_opt_and_scheduler(_UpperCAmelCase ,_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> Optional[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
694
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class A__ ( unittest.TestCase ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple: '''simple docstring''' _a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8} _a : int =parent _a : Optional[int] =batch_size _a : List[str] =num_channels _a : Optional[Any] =image_size _a : int =min_resolution _a : str =max_resolution _a : str =do_resize _a : Tuple =size _a : Tuple =do_normalize def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804], [-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self :List[Any] ) -> List[Any]: '''simple docstring''' _a : Any =ImageGPTImageProcessingTester(self ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self :Dict ) -> Any: '''simple docstring''' _a : Optional[Any] =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) ) def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]: '''simple docstring''' _a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} ) _a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} ) def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]: '''simple docstring''' _a : List[str] =self.image_processing_class(**self.image_processor_dict ) _a : Dict =json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) ) else: self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' _a : List[Any] =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" ) image_processor_first.to_json_file(SCREAMING_SNAKE_CASE ) _a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict() _a : Tuple =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> str: '''simple docstring''' _a : List[str] =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE ) _a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict() _a : Union[str, Any] =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: _a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" ) _a : Dict =Image.open(dataset[4]["""file"""] ) _a : Optional[int] =Image.open(dataset[5]["""file"""] ) _a : Optional[Any] =[imagea, imagea] return images @require_vision @require_torch class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) _a : int =prepare_images() # test non-batched _a : Dict =image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) ) _a : Optional[int] =[3_0_6, 1_9_1, 1_9_1] self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE ) # test batched _a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) ) _a : Any =[3_0_3, 1_3, 1_3] self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
694
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) A__: Tuple = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: List[str] = ['''FNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Tuple = ['''FNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Any = [ '''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FNetForMaskedLM''', '''FNetForMultipleChoice''', '''FNetForNextSentencePrediction''', '''FNetForPreTraining''', '''FNetForQuestionAnswering''', '''FNetForSequenceClassification''', '''FNetForTokenClassification''', '''FNetLayer''', '''FNetModel''', '''FNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys A__: Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool: _a : Optional[int] =len(_UpperCAmelCase ) _a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): _a : Any =True # sum is not zero and set is empty then false for i in range(1 ,required_sum + 1 ): _a : int =False for i in range(1 ,arr_len + 1 ): for j in range(1 ,required_sum + 1 ): if arr[i - 1] > j: _a : Optional[Any] =subset[i - 1][j] if arr[i - 1] <= j: _a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[Any] ) -> str: _a : Union[str, Any] =0 if start < end: _a : List[str] =randint(_UpperCAmelCase ,_UpperCAmelCase ) _a : str =a[end] _a : int =a[pivot] _a : Dict =temp _a , _a : str =_in_place_partition(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) count += _in_place_quick_sort(_UpperCAmelCase ,_UpperCAmelCase ,p - 1 ) count += _in_place_quick_sort(_UpperCAmelCase ,p + 1 ,_UpperCAmelCase ) return count def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ) -> str: _a : List[str] =0 _a : str =randint(_UpperCAmelCase ,_UpperCAmelCase ) _a : Any =a[end] _a : List[str] =a[pivot] _a : Optional[Any] =temp _a : Optional[int] =start - 1 for index in range(_UpperCAmelCase ,_UpperCAmelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value _a : Optional[int] =new_pivot_index + 1 _a : Tuple =a[new_pivot_index] _a : Tuple =a[index] _a : Optional[int] =temp _a : Union[str, Any] =a[new_pivot_index + 1] _a : str =a[end] _a : Dict =temp return new_pivot_index + 1, count A__: List[Any] = TemporaryFile() A__: List[Any] = 100 # 1000 elements are to be sorted A__ , A__: str = 0, 1 # mean and standard deviation A__: str = np.random.normal(mu, sigma, p) np.save(outfile, X) print('''The array is''') print(X) outfile.seek(0) # using the same array A__: Dict = np.load(outfile) A__: Dict = len(M) - 1 A__: Union[str, Any] = _in_place_quick_sort(M, 0, r) print( '''No of Comparisons for 100 elements selected from a standard normal distribution''' '''is :''' ) print(z)
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int: _a : Optional[Any] =[] _a , _a : Union[str, Any] =0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_UpperCAmelCase ) _a , _a : Optional[Any] =b, a + b return sum(_UpperCAmelCase ) if __name__ == "__main__": print(F"{solution() = }")
694
1
'''simple docstring''' A__: Optional[int] = 256 # Modulus to hash a string A__: Optional[int] = 100_0003 def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> bool: _a : List[str] =len(_UpperCAmelCase ) _a : int =len(_UpperCAmelCase ) if p_len > t_len: return False _a : Any =0 _a : Any =0 _a : str =1 # Calculating the hash of pattern and substring of text for i in range(_UpperCAmelCase ): _a : int =(ord(pattern[i] ) + p_hash * alphabet_size) % modulus _a : Tuple =(ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _a : Optional[Any] =(modulus_power * alphabet_size) % modulus for i in range(0 ,t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _a : Union[str, Any] =( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def SCREAMING_SNAKE_CASE_ ( ) -> None: _a : str ="""abc1abc12""" _a : Optional[Any] ="""alskfjaldsabc1abc1abc12k23adsfabcabc""" _a : Optional[int] ="""alskfjaldsk23adsfabcabc""" assert rabin_karp(_UpperCAmelCase ,_UpperCAmelCase ) and not rabin_karp(_UpperCAmelCase ,_UpperCAmelCase ) # Test 2) _a : Any ="""ABABX""" _a : int ="""ABABZABABYABABX""" assert rabin_karp(_UpperCAmelCase ,_UpperCAmelCase ) # Test 3) _a : Optional[int] ="""AAAB""" _a : int ="""ABAAAAAB""" assert rabin_karp(_UpperCAmelCase ,_UpperCAmelCase ) # Test 4) _a : int ="""abcdabcy""" _a : Union[str, Any] ="""abcxabcdabxabcdabcdabcy""" assert rabin_karp(_UpperCAmelCase ,_UpperCAmelCase ) # Test 5) _a : List[str] ="""Lü""" _a : List[str] ="""Lüsai""" assert rabin_karp(_UpperCAmelCase ,_UpperCAmelCase ) _a : Union[str, Any] ="""Lue""" assert not rabin_karp(_UpperCAmelCase ,_UpperCAmelCase ) print("""Success.""" ) if __name__ == "__main__": test_rabin_karp()
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return 0 elif n == 2: return 1 else: _a : Dict =[0, 1] for i in range(2 ,n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: _a : Union[str, Any] =0 _a : Optional[Any] =2 while digits < n: index += 1 _a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) ) return index def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int: return fibonacci_digits_index(_UpperCAmelCase ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
694
1
'''simple docstring''' import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets A__: Tuple = '''\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } ''' A__: Optional[int] = '''\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve ''' A__: int = ''' Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: "c" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric(\'mauve\') >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCAmelCase ( self :Any ) -> Any: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[ """https://arxiv.org/abs/2102.01454""", """https://github.com/krishnap25/mauve""", ] , ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=None , SCREAMING_SNAKE_CASE :List[Any]="auto" , SCREAMING_SNAKE_CASE :int=-1 , SCREAMING_SNAKE_CASE :Optional[Any]=0.9 , SCREAMING_SNAKE_CASE :List[Any]=5 , SCREAMING_SNAKE_CASE :Optional[int]=5_0_0 , SCREAMING_SNAKE_CASE :Optional[int]="gpt2-large" , SCREAMING_SNAKE_CASE :int=-1 , SCREAMING_SNAKE_CASE :str=1_0_2_4 , SCREAMING_SNAKE_CASE :Any=2_5 , SCREAMING_SNAKE_CASE :Any=5 , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :List[Any]=2_5 , ) -> Optional[Any]: '''simple docstring''' _a : Dict =compute_mauve( p_text=SCREAMING_SNAKE_CASE , q_text=SCREAMING_SNAKE_CASE , p_features=SCREAMING_SNAKE_CASE , q_features=SCREAMING_SNAKE_CASE , p_tokens=SCREAMING_SNAKE_CASE , q_tokens=SCREAMING_SNAKE_CASE , num_buckets=SCREAMING_SNAKE_CASE , pca_max_data=SCREAMING_SNAKE_CASE , kmeans_explained_var=SCREAMING_SNAKE_CASE , kmeans_num_redo=SCREAMING_SNAKE_CASE , kmeans_max_iter=SCREAMING_SNAKE_CASE , featurize_model_name=SCREAMING_SNAKE_CASE , device_id=SCREAMING_SNAKE_CASE , max_text_length=SCREAMING_SNAKE_CASE , divergence_curve_discretization_size=SCREAMING_SNAKE_CASE , mauve_scaling_factor=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , seed=SCREAMING_SNAKE_CASE , ) return out
694
'''simple docstring''' import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str: # Initialise PyTorch model _a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase ) print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) ) _a : Dict =RemBertModel(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # Save pytorch-model print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) ) torch.save(model.state_dict() ,_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--rembert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained RemBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A__: Tuple = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
694
1
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness A__: str = '''\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } ''' A__: Dict = '''\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). ''' A__: Any = ''' Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {\'pass@1\': 0.5, \'pass@2\': 1.0} ''' A__: Optional[int] = ''' ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ ''' A__: Dict = '''The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCAmelCase ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict=[1, 1_0, 1_0_0] , SCREAMING_SNAKE_CASE :Tuple=4 , SCREAMING_SNAKE_CASE :int=3.0 ) -> List[Any]: '''simple docstring''' if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""" ) with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE ) as executor: _a : Optional[Any] =[] _a : List[Any] =Counter() _a : Dict =0 _a : Optional[int] =defaultdict(SCREAMING_SNAKE_CASE ) for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ): for candidate in candidates: _a : Optional[int] =candidate + """\n""" + test_case _a : Optional[int] =(test_program, timeout, task_id, completion_id[task_id]) _a : Optional[Any] =executor.submit(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) futures.append(SCREAMING_SNAKE_CASE ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(SCREAMING_SNAKE_CASE ): _a : Union[str, Any] =future.result() results[result["task_id"]].append((result["""completion_id"""], result) ) _a , _a : Any =[], [] for result in results.values(): result.sort() _a : str =[r[1]["""passed"""] for r in result] total.append(len(SCREAMING_SNAKE_CASE ) ) correct.append(sum(SCREAMING_SNAKE_CASE ) ) _a : Union[str, Any] =np.array(SCREAMING_SNAKE_CASE ) _a : str =np.array(SCREAMING_SNAKE_CASE ) _a : Any =k _a : List[str] ={f"pass@{k}": estimate_pass_at_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ) -> List[str]: def estimator(_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Dict =itertools.repeat(_UpperCAmelCase ,len(_UpperCAmelCase ) ) else: assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) _a : str =iter(_UpperCAmelCase ) return np.array([estimator(int(_UpperCAmelCase ) ,int(_UpperCAmelCase ) ,_UpperCAmelCase ) for n, c in zip(_UpperCAmelCase ,_UpperCAmelCase )] )
694
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A__: Optional[int] = logging.get_logger(__name__) A__: Union[str, Any] = '''▁''' A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''} A__: Optional[int] = { '''vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''', }, '''monolingual_vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''', }, } A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024} class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = VOCAB_FILES_NAMES __UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None: '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it _a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token _a : int ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , ) _a : Dict =vocab_file _a : int =monolingual_vocab_file _a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _a : List[Any] ={} _a : List[str] =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: _a : Optional[Any] =cnt cnt += 1 with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _a : int =line.strip().split()[0] _a : str =len(self.fairseq_tokens_to_ids ) if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: _a : Optional[int] =len(self.fairseq_tokens_to_ids ) _a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self :int ) -> List[Any]: '''simple docstring''' _a : Optional[int] =self.__dict__.copy() _a : Optional[Any] =None _a : str =self.sp_model.serialized_model_proto() return state def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str: '''simple docstring''' _a : List[str] =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _a : Tuple ={} _a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _a : Optional[int] =[self.cls_token_id] _a : int =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _a : List[str] =[self.sep_token_id] _a : int =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __UpperCAmelCase ( self :Optional[int] ) -> int: '''simple docstring''' return len(self.fairseq_ids_to_tokens ) def __UpperCAmelCase ( self :int ) -> Union[str, Any]: '''simple docstring''' _a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict: '''simple docstring''' return self.fairseq_ids_to_tokens[index] def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip() return out_string def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _a : int =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _a : Any =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi: _a : Optional[Any] =self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" ) return out_vocab_file, out_monolingual_vocab_file
694
1
'''simple docstring''' import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html A__: Optional[int] = '''platform''' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class A__ : __UpperCamelCase : Optional[int] = PegasusConfig __UpperCamelCase : List[Any] = {} __UpperCamelCase : Union[str, Any] = "gelu" def __init__( self :int , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :int=1_3 , SCREAMING_SNAKE_CASE :Optional[int]=7 , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :int=9_9 , SCREAMING_SNAKE_CASE :Optional[Any]=3_2 , SCREAMING_SNAKE_CASE :Dict=5 , SCREAMING_SNAKE_CASE :Optional[Any]=4 , SCREAMING_SNAKE_CASE :Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE :Any=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=2_0 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Tuple=1 , SCREAMING_SNAKE_CASE :Dict=0 , ) -> Tuple: '''simple docstring''' _a : List[str] =parent _a : Optional[Any] =batch_size _a : str =seq_length _a : str =is_training _a : Any =use_labels _a : Optional[Any] =vocab_size _a : int =hidden_size _a : Any =num_hidden_layers _a : str =num_attention_heads _a : str =intermediate_size _a : Optional[int] =hidden_dropout_prob _a : Optional[int] =attention_probs_dropout_prob _a : List[str] =max_position_embeddings _a : Any =eos_token_id _a : Optional[int] =pad_token_id _a : List[Any] =bos_token_id def __UpperCAmelCase ( self :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' _a : Optional[Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) _a : str =np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) _a : Optional[int] =np.concatenate([input_ids, eos_tensor] , axis=1 ) _a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : List[Any] =self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _a : Optional[Any] =prepare_pegasus_inputs_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return config, inputs_dict def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[Any] ) -> List[str]: '''simple docstring''' _a : Union[str, Any] =2_0 _a : Optional[int] =model_class_name(SCREAMING_SNAKE_CASE ) _a : int =model.encode(inputs_dict["""input_ids"""] ) _a , _a : str =( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _a : List[Any] =model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Tuple =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) _a : Optional[int] =jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _a : Any =model.decode( decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE , decoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , decoder_position_ids=SCREAMING_SNAKE_CASE , ) _a : Dict =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _a : str =model.decode( decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE , decoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_position_ids=SCREAMING_SNAKE_CASE , ) _a : str =model.decode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : List[str] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" ) def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> Tuple: '''simple docstring''' _a : Any =2_0 _a : List[Any] =model_class_name(SCREAMING_SNAKE_CASE ) _a : List[Any] =model.encode(inputs_dict["""input_ids"""] ) _a , _a : Optional[int] =( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _a : Optional[int] =jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _a : Tuple =model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Tuple =jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _a : Optional[int] =model.decode( decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE , decoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , decoder_position_ids=SCREAMING_SNAKE_CASE , ) _a : List[str] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _a : Tuple =model.decode( decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=SCREAMING_SNAKE_CASE , decoder_position_ids=SCREAMING_SNAKE_CASE , ) _a : str =model.decode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , decoder_attention_mask=SCREAMING_SNAKE_CASE ) _a : Optional[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Optional[int]=None ,) -> int: if attention_mask is None: _a : Tuple =np.not_equal(_UpperCAmelCase ,config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _a : List[Any] =np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ), ] ,axis=-1 ,) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : Union[str, Any] = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __UpperCamelCase : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __UpperCamelCase : Dict = True __UpperCamelCase : Optional[Any] = False __UpperCamelCase : List[str] = False __UpperCamelCase : Optional[int] = False def __UpperCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _a : List[str] =FlaxPegasusModelTester(self ) _a : Union[str, Any] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Dict ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self :Tuple ) -> List[str]: '''simple docstring''' _a , _a : Tuple =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :str ) -> Optional[Any]: '''simple docstring''' _a , _a : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :List[str] ) -> Any: '''simple docstring''' _a , _a : Dict =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _a : Dict =self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Dict =model_class(SCREAMING_SNAKE_CASE ) @jax.jit def encode_jitted(SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[str]=None , **SCREAMING_SNAKE_CASE :Any ): return model.encode(input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) with self.subTest("""JIT Enabled""" ): _a : int =encode_jitted(**SCREAMING_SNAKE_CASE ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _a : Dict =encode_jitted(**SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): self.assertEqual(jitted_output.shape , output.shape ) def __UpperCAmelCase ( self :Dict ) -> int: '''simple docstring''' _a , _a : Dict =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _a : int =model_class(SCREAMING_SNAKE_CASE ) _a : Any =model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) _a : Union[str, Any] ={ """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :List[str] ): return model.decode( decoder_input_ids=SCREAMING_SNAKE_CASE , decoder_attention_mask=SCREAMING_SNAKE_CASE , encoder_outputs=SCREAMING_SNAKE_CASE , ) with self.subTest("""JIT Enabled""" ): _a : Any =decode_jitted(**SCREAMING_SNAKE_CASE ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _a : int =decode_jitted(**SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __UpperCAmelCase ( self :int ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: _a : Union[str, Any] =model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=SCREAMING_SNAKE_CASE ) _a : str =np.ones((1, 1) ) _a : Optional[Any] =model(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @slow def __UpperCAmelCase ( self :List[str] ) -> int: '''simple docstring''' _a : Optional[Any] =FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) _a : Dict =PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) _a : List[Any] =[ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] _a : List[Any] =[ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] _a : Union[str, Any] =tokenizer(SCREAMING_SNAKE_CASE , return_tensors="""np""" , truncation=SCREAMING_SNAKE_CASE , max_length=5_1_2 , padding=SCREAMING_SNAKE_CASE ) _a : List[Any] =model.generate(**SCREAMING_SNAKE_CASE , num_beams=2 ).sequences _a : Optional[Any] =tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) assert tgt_text == decoded
694
'''simple docstring''' from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
694
1
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''') def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict: hf_model.apply_weight_norm() _a : Any =checkpoint["""input_conv.weight_g"""] _a : Union[str, Any] =checkpoint["""input_conv.weight_v"""] _a : Optional[int] =checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): _a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"] _a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"] _a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): _a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"] _a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"] _a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"] _a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"] _a : Dict =checkpoint["""output_conv.1.weight_g"""] _a : str =checkpoint["""output_conv.1.weight_v"""] _a : Union[str, Any] =checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]: if config_path is not None: _a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase ) else: _a : str =SpeechTaHifiGanConfig() _a : Tuple =SpeechTaHifiGan(_UpperCAmelCase ) _a : int =torch.load(_UpperCAmelCase ) load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase ) _a : Dict =np.load(_UpperCAmelCase ) _a : Union[str, Any] =stats[0].reshape(-1 ) _a : Any =stats[1].reshape(-1 ) _a : Tuple =torch.from_numpy(_UpperCAmelCase ).float() _a : List[str] =torch.from_numpy(_UpperCAmelCase ).float() model.save_pretrained(_UpperCAmelCase ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) A__: Tuple = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]: _a : Dict =current_set.copy() for row_index, row in enumerate(_UpperCAmelCase ): _a : Any =row[0] for column_index, column in enumerate(_UpperCAmelCase ): if magnitude == 0: _a : Any =column continue _a : Union[str, Any] =column / magnitude # Subtract to cancel term _a : Optional[Any] =current_set[0] _a : List[Any] =[first_row] _a : Tuple =current_set[1::] for row in current_set: _a : Any =[] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(_UpperCAmelCase ) continue for column_index in range(len(_UpperCAmelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(_UpperCAmelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: _a : List[str] =final_set[0] _a : Tuple =[] _a : Tuple =[] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _a : str =simplify(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): resultant[i].insert(0 ,current_first_column[i] ) resultant.insert(0 ,_UpperCAmelCase ) _a : List[Any] =resultant return final_set def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list: if len(_UpperCAmelCase ) == 0: raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) _a : str =len(_UpperCAmelCase ) + 1 if any(len(_UpperCAmelCase ) != _length for item in equations ): raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) for row in equations: if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ): raise ValueError("""solve_simultaneous() requires lists of integers""" ) if len(_UpperCAmelCase ) == 1: return [equations[0][-1] / equations[0][0]] _a : str =equations.copy() if any(0 in row for row in data_set ): _a : Optional[int] =data_set.copy() _a : str =[] for row_index, row in enumerate(_UpperCAmelCase ): if 0 not in row: _a : List[Any] =data_set.pop(_UpperCAmelCase ) break if not full_row: raise ValueError("""solve_simultaneous() requires at least 1 full equation""" ) data_set.insert(0 ,_UpperCAmelCase ) _a : Dict =data_set.copy() _a : Any =simplify(_UpperCAmelCase ) _a : Any =simplified[::-1] _a : list =[] for row in simplified: _a : Optional[Any] =row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(_UpperCAmelCase ) == 0: solutions.append(0 ) continue _a : List[str] =temp_row[1::] _a : int =temp_row[::-1] for column_index, column in enumerate(_UpperCAmelCase ): current_solution -= column * solutions[column_index] solutions.append(_UpperCAmelCase ) _a : Tuple =[] for item in solutions: final.append(float(round(_UpperCAmelCase ,5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() A__: int = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
694
1
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow A__: str = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ '''text-classification''', '''language-modeling''', '''summarization''', '''token-classification''', '''question-answering''', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) A__: Dict = logging.getLogger() def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: _a : Dict =argparse.ArgumentParser() parser.add_argument("""-f""" ) _a : Union[str, Any] =parser.parse_args() return args.f def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple="eval" ) -> Tuple: _a : int =os.path.join(_UpperCAmelCase ,F"{split}_results.json" ) if os.path.exists(_UpperCAmelCase ): with open(_UpperCAmelCase ,"""r""" ) as f: return json.load(_UpperCAmelCase ) raise ValueError(F"can't find {path}" ) A__: Any = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A__ ( UpperCAmelCase__ ): def __UpperCAmelCase ( self :str ) -> List[Any]: '''simple docstring''' _a : Optional[int] =self.get_auto_remove_tmp_dir() _a : List[Any] =f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split() with patch.object(SCREAMING_SNAKE_CASE , """argv""" , SCREAMING_SNAKE_CASE ): run_flax_glue.main() _a : int =get_results(SCREAMING_SNAKE_CASE ) self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 ) @slow def __UpperCAmelCase ( self :Tuple ) -> Dict: '''simple docstring''' _a : Optional[int] =self.get_auto_remove_tmp_dir() _a : List[Any] =f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(SCREAMING_SNAKE_CASE , """argv""" , SCREAMING_SNAKE_CASE ): run_clm_flax.main() _a : List[Any] =get_results(SCREAMING_SNAKE_CASE ) self.assertLess(result["""eval_perplexity"""] , 1_0_0 ) @slow def __UpperCAmelCase ( self :Tuple ) -> Optional[Any]: '''simple docstring''' _a : Any =self.get_auto_remove_tmp_dir() _a : List[str] =f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split() with patch.object(SCREAMING_SNAKE_CASE , """argv""" , SCREAMING_SNAKE_CASE ): run_summarization_flax.main() _a : Any =get_results(SCREAMING_SNAKE_CASE , split="""test""" ) self.assertGreaterEqual(result["""test_rouge1"""] , 1_0 ) self.assertGreaterEqual(result["""test_rouge2"""] , 2 ) self.assertGreaterEqual(result["""test_rougeL"""] , 7 ) self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 ) @slow def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' _a : List[Any] =self.get_auto_remove_tmp_dir() _a : Dict =f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split() with patch.object(SCREAMING_SNAKE_CASE , """argv""" , SCREAMING_SNAKE_CASE ): run_mlm_flax.main() _a : List[Any] =get_results(SCREAMING_SNAKE_CASE ) self.assertLess(result["""eval_perplexity"""] , 4_2 ) @slow def __UpperCAmelCase ( self :Any ) -> Union[str, Any]: '''simple docstring''' _a : int =self.get_auto_remove_tmp_dir() _a : Optional[Any] =f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(SCREAMING_SNAKE_CASE , """argv""" , SCREAMING_SNAKE_CASE ): run_ta_mlm_flax.main() _a : Optional[int] =get_results(SCREAMING_SNAKE_CASE ) self.assertGreaterEqual(result["""eval_accuracy"""] , 0.42 ) @slow def __UpperCAmelCase ( self :List[Any] ) -> List[str]: '''simple docstring''' # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu _a : List[Any] =7 if get_gpu_count() > 1 else 2 _a : Optional[int] =self.get_auto_remove_tmp_dir() _a : Dict =f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split() with patch.object(SCREAMING_SNAKE_CASE , """argv""" , SCREAMING_SNAKE_CASE ): run_flax_ner.main() _a : int =get_results(SCREAMING_SNAKE_CASE ) self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 ) self.assertGreaterEqual(result["""eval_f1"""] , 0.3 ) @slow def __UpperCAmelCase ( self :Dict ) -> str: '''simple docstring''' _a : str =self.get_auto_remove_tmp_dir() _a : Optional[Any] =f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split() with patch.object(SCREAMING_SNAKE_CASE , """argv""" , SCREAMING_SNAKE_CASE ): run_qa.main() _a : Optional[Any] =get_results(SCREAMING_SNAKE_CASE ) self.assertGreaterEqual(result["""eval_f1"""] , 3_0 ) self.assertGreaterEqual(result["""eval_exact"""] , 3_0 )
694
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__: Dict = logging.get_logger(__name__) A__: Optional[int] = { '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = "markuplm" def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any: '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) _a : Any =vocab_size _a : List[str] =hidden_size _a : List[str] =num_hidden_layers _a : Tuple =num_attention_heads _a : Union[str, Any] =hidden_act _a : Tuple =intermediate_size _a : Optional[Any] =hidden_dropout_prob _a : int =attention_probs_dropout_prob _a : Any =max_position_embeddings _a : List[Any] =type_vocab_size _a : List[Any] =initializer_range _a : List[Any] =layer_norm_eps _a : Optional[int] =position_embedding_type _a : List[Any] =use_cache _a : List[str] =classifier_dropout # additional properties _a : int =max_depth _a : Union[str, Any] =max_xpath_tag_unit_embeddings _a : str =max_xpath_subs_unit_embeddings _a : int =tag_pad_id _a : List[Any] =subs_pad_id _a : str =xpath_unit_hidden_size
694
1
'''simple docstring''' from __future__ import annotations class A__ : def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None: '''simple docstring''' _a : int =order # a_{0} ... a_{k} _a : Optional[Any] =[1.0] + [0.0] * order # b_{0} ... b_{k} _a : Tuple =[1.0] + [0.0] * order # x[n-1] ... x[n-k] _a : List[Any] =[0.0] * self.order # y[n-1] ... y[n-k] _a : Tuple =[0.0] * self.order def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None: '''simple docstring''' if len(SCREAMING_SNAKE_CASE ) < self.order: _a : int =[1.0, *a_coeffs] if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : int =( f"Expected a_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : Optional[Any] =( f"Expected b_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) _a : List[str] =a_coeffs _a : Union[str, Any] =b_coeffs def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float: '''simple docstring''' _a : str =0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) _a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0] _a : str =self.input_history[:-1] _a : Optional[Any] =self.output_history[:-1] _a : Optional[int] =sample _a : Tuple =result return result
694
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''') def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict: hf_model.apply_weight_norm() _a : Any =checkpoint["""input_conv.weight_g"""] _a : Union[str, Any] =checkpoint["""input_conv.weight_v"""] _a : Optional[int] =checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): _a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"] _a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"] _a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): _a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"] _a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"] _a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"] _a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"] _a : Dict =checkpoint["""output_conv.1.weight_g"""] _a : str =checkpoint["""output_conv.1.weight_v"""] _a : Union[str, Any] =checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]: if config_path is not None: _a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase ) else: _a : str =SpeechTaHifiGanConfig() _a : Tuple =SpeechTaHifiGan(_UpperCAmelCase ) _a : int =torch.load(_UpperCAmelCase ) load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase ) _a : Dict =np.load(_UpperCAmelCase ) _a : Union[str, Any] =stats[0].reshape(-1 ) _a : Any =stats[1].reshape(-1 ) _a : Tuple =torch.from_numpy(_UpperCAmelCase ).float() _a : List[str] =torch.from_numpy(_UpperCAmelCase ).float() model.save_pretrained(_UpperCAmelCase ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) A__: Tuple = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
694
1
'''simple docstring''' import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Optional[int] = (KDPMaDiscreteScheduler,) __UpperCamelCase : int = 10 def __UpperCAmelCase ( self :Any , **SCREAMING_SNAKE_CASE :Any ) -> Dict: '''simple docstring''' _a : int ={ """num_train_timesteps""": 1_1_0_0, """beta_start""": 0.0_001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**SCREAMING_SNAKE_CASE ) return config def __UpperCAmelCase ( self :Dict ) -> Union[str, Any]: '''simple docstring''' for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :int ) -> int: '''simple docstring''' for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE , beta_end=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :List[str] ) -> Any: '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> Tuple: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :str ) -> Union[str, Any]: '''simple docstring''' _a : Tuple =self.scheduler_classes[0] _a : Any =self.get_scheduler_config(prediction_type="""v_prediction""" ) _a : Union[str, Any] =scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps ) _a : int =self.dummy_model() _a : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma _a : Any =sample.to(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): _a : Tuple =scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : List[str] =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Tuple =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Dict =output.prev_sample _a : Any =torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) _a : Tuple =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2 assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.693_4286_5017_0972e-07 ) < 1e-2 assert abs(result_mean.item() - 0.0_002 ) < 1e-3 def __UpperCAmelCase ( self :Optional[int] ) -> str: '''simple docstring''' if torch_device == "mps": return _a : Dict =self.scheduler_classes[0] _a : Dict =self.get_scheduler_config() _a : Optional[int] =scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps ) _a : Tuple =self.dummy_model() _a : Optional[int] =self.dummy_sample_deter * scheduler.init_noise_sigma _a : Dict =sample.to(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): _a : Any =scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : int =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : List[str] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Any =output.prev_sample _a : Optional[int] =torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) _a : Optional[Any] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4_125 ) < 1e-2 assert abs(result_mean.item() - 0.0_266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4_125 ) < 1e-2 assert abs(result_mean.item() - 0.0_266 ) < 1e-3 def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' if torch_device == "mps": return _a : Dict =self.scheduler_classes[0] _a : List[Any] =self.get_scheduler_config() _a : int =scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =self.dummy_model() _a : List[str] =self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _a : int =scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : List[str] =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : str =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Tuple =output.prev_sample _a : str =torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) _a : Optional[Any] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) if str(SCREAMING_SNAKE_CASE ).startswith("""cpu""" ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4_125 ) < 1e-2 assert abs(result_mean.item() - 0.0_266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4_125 ) < 1e-2 assert abs(result_mean.item() - 0.0_266 ) < 1e-3
694
'''simple docstring''' class A__ : def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]: '''simple docstring''' _a : List[str] =None _a : Optional[Any] =None _a : str =graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Optional[int] =len(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =None def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any: '''simple docstring''' if sources is int: _a : Tuple =[sources] if sinks is int: _a : Optional[int] =[sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return _a : Union[str, Any] =sources[0] _a : Tuple =sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: _a : Tuple =0 for i in sources: max_input_flow += sum(self.graph[i] ) _a : List[Any] =len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _a : Any =max_input_flow _a : List[str] =0 _a : List[str] =len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _a : str =max_input_flow _a : Optional[Any] =size - 1 def __UpperCAmelCase ( self :Optional[int] ) -> Tuple: '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int: '''simple docstring''' _a : Tuple =algorithm(self ) class A__ : def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict: '''simple docstring''' _a : List[str] =flow_network _a : List[Any] =flow_network.verticesCount _a : str =flow_network.sourceIndex _a : str =flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _a : List[Any] =flow_network.graph _a : Optional[int] =False def __UpperCAmelCase ( self :List[Any] ) -> List[str]: '''simple docstring''' if not self.executed: self._algorithm() _a : Any =True def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]: '''simple docstring''' pass class A__ ( UpperCAmelCase__ ): def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result _a : List[Any] =-1 def __UpperCAmelCase ( self :Dict ) -> Tuple: '''simple docstring''' if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class A__ ( UpperCAmelCase__ ): def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) _a : int =[[0] * self.verticies_count for i in range(self.verticies_count )] _a : Union[str, Any] =[0] * self.verticies_count _a : Optional[Any] =[0] * self.verticies_count def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]: '''simple docstring''' _a : int =self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _a : Tuple =[ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _a : List[Any] =0 while i < len(SCREAMING_SNAKE_CASE ): _a : Any =vertices_list[i] _a : str =self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) _a : List[str] =0 else: i += 1 _a : Optional[int] =sum(self.preflow[self.source_index] ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]: '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' _a : List[str] =min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]: '''simple docstring''' _a : int =None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _a : Optional[Any] =self.heights[to_index] if min_height is not None: _a : Any =min_height + 1 if __name__ == "__main__": A__: str = [0] A__: Optional[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network A__: Union[str, Any] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate A__: List[str] = flow_network.find_maximum_flow() print(F"maximum flow is {maximum_flow}")
694
1
'''simple docstring''' import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class A__ ( UpperCAmelCase__ ): def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Union[str, Any]=7 , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Tuple=False , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Any=3_2 , SCREAMING_SNAKE_CASE :Optional[Any]=5 , SCREAMING_SNAKE_CASE :Union[str, Any]=4 , SCREAMING_SNAKE_CASE :List[str]=6_4 , SCREAMING_SNAKE_CASE :str="gelu" , SCREAMING_SNAKE_CASE :List[str]=0.1 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Dict=0.02 , SCREAMING_SNAKE_CASE :Tuple=3 , SCREAMING_SNAKE_CASE :Union[str, Any]=4 , SCREAMING_SNAKE_CASE :Optional[int]=None , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :str=2 , SCREAMING_SNAKE_CASE :Optional[int]=2 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :List[Any]=1 , ) -> Dict: '''simple docstring''' _a : str =parent _a : List[Any] =batch_size _a : Tuple =seq_length _a : List[str] =is_training _a : Any =use_input_mask _a : int =use_token_type_ids _a : List[Any] =use_labels _a : List[str] =vocab_size _a : List[str] =hidden_size _a : List[str] =num_hidden_layers _a : List[Any] =num_attention_heads _a : Dict =intermediate_size _a : List[str] =hidden_act _a : Union[str, Any] =hidden_dropout_prob _a : Union[str, Any] =attention_probs_dropout_prob _a : Optional[Any] =max_position_embeddings _a : Dict =type_vocab_size _a : int =type_sequence_label_size _a : str =initializer_range _a : Any =num_labels _a : List[str] =num_choices _a : Tuple =scope _a : Dict =q_groups _a : Tuple =k_groups _a : Optional[Any] =v_groups _a : Any =post_attention_groups _a : List[str] =intermediate_groups _a : Dict =output_groups def __UpperCAmelCase ( self :Dict ) -> Any: '''simple docstring''' _a : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : Tuple =None if self.use_input_mask: _a : Tuple =random_attention_mask([self.batch_size, self.seq_length] ) _a : str =None _a : Tuple =None _a : Optional[int] =None if self.use_labels: _a : int =ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _a : Optional[int] =ids_tensor([self.batch_size] , self.num_choices ) _a : Dict =self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self :Optional[int] ) -> Any: '''simple docstring''' return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any] ) -> List[Any]: '''simple docstring''' _a : List[Any] =SqueezeBertModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : Tuple =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[Any]: '''simple docstring''' _a : Union[str, Any] =SqueezeBertForMaskedLM(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : List[Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :List[Any] ) -> List[str]: '''simple docstring''' _a : Optional[int] =SqueezeBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : List[str] =model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> int: '''simple docstring''' _a : List[str] =self.num_labels _a : str =SqueezeBertForSequenceClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : Optional[int] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[int] ) -> int: '''simple docstring''' _a : Tuple =self.num_labels _a : Optional[int] =SqueezeBertForTokenClassification(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : str =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Tuple ) -> Optional[Any]: '''simple docstring''' _a : Union[str, Any] =self.num_choices _a : List[Any] =SqueezeBertForMultipleChoice(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _a : List[Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _a : List[Any] =model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCAmelCase ( self :Dict ) -> Tuple: '''simple docstring''' _a : Tuple =self.prepare_config_and_inputs() ((_a) , (_a) , (_a) , (_a) , (_a) , (_a)) : List[Any] =config_and_inputs _a : Dict ={"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : int = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) __UpperCamelCase : int = ( { "feature-extraction": SqueezeBertModel, "fill-mask": SqueezeBertForMaskedLM, "question-answering": SqueezeBertForQuestionAnswering, "text-classification": SqueezeBertForSequenceClassification, "token-classification": SqueezeBertForTokenClassification, "zero-shot": SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase : Optional[int] = False __UpperCamelCase : Optional[int] = True __UpperCamelCase : str = False def __UpperCAmelCase ( self :Dict ) -> Tuple: '''simple docstring''' _a : Optional[int] =SqueezeBertModelTester(self ) _a : Union[str, Any] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , dim=3_7 ) def __UpperCAmelCase ( self :Any ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]: '''simple docstring''' _a : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Any ) -> Union[str, Any]: '''simple docstring''' _a : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :List[str] ) -> Dict: '''simple docstring''' _a : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Tuple ) -> Any: '''simple docstring''' _a : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :int ) -> Tuple: '''simple docstring''' _a : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Any ) -> Optional[Any]: '''simple docstring''' _a : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*SCREAMING_SNAKE_CASE ) @slow def __UpperCAmelCase ( self :Tuple ) -> str: '''simple docstring''' for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a : Tuple =SqueezeBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_sentencepiece @require_tokenizers @require_torch class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Dict ) -> Any: '''simple docstring''' _a : Dict =SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" ) _a : Union[str, Any] =torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] ) _a : Any =model(SCREAMING_SNAKE_CASE )[0] _a : Optional[Any] =torch.Size((1, 3) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) _a : List[Any] =torch.tensor([[0.6_401, -0.0_349, -0.6_041]] ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
694
'''simple docstring''' A__: Optional[int] = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] A__: int = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
694
1
'''simple docstring''' from __future__ import annotations import time import numpy as np A__: Optional[Any] = [8, 5, 9, 7] A__: int = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] A__: Any = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class A__ : def __init__( self :int , SCREAMING_SNAKE_CASE :list[int] , SCREAMING_SNAKE_CASE :list[list[int]] , SCREAMING_SNAKE_CASE :list[list[int]] , ) -> None: '''simple docstring''' _a : str =claim_vector _a : Optional[int] =allocated_resources_table _a : Optional[int] =maximum_claim_table def __UpperCAmelCase ( self :Any ) -> list[int]: '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def __UpperCAmelCase ( self :int ) -> list[int]: '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def __UpperCAmelCase ( self :List[str] ) -> list[list[int]]: '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(SCREAMING_SNAKE_CASE ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def __UpperCAmelCase ( self :Optional[Any] ) -> dict[int, list[int]]: '''simple docstring''' return {self.__need().index(SCREAMING_SNAKE_CASE ): i for i in self.__need()} def __UpperCAmelCase ( self :Tuple , **SCREAMING_SNAKE_CASE :Tuple ) -> None: '''simple docstring''' _a : List[Any] =self.__need() _a : List[str] =self.__allocated_resources_table _a : Union[str, Any] =self.__available_resources() _a : Optional[Any] =self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("""_""" * 5_0 + """\n""" ) while need_list: _a : Any =False for each_need in need_list: _a : Tuple =True for index, need in enumerate(SCREAMING_SNAKE_CASE ): if need > available_resources[index]: _a : Tuple =False break if execution: _a : List[str] =True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: _a : str =original_need_index print(f"Process {process_number + 1} is executing." ) # remove the process run from stack need_list.remove(SCREAMING_SNAKE_CASE ) # update available/freed resources stack _a : Dict =np.array(SCREAMING_SNAKE_CASE ) + np.array( alloc_resources_table[process_number] ) print( """Updated available resource stack for processes: """ + """ """.join([str(SCREAMING_SNAKE_CASE ) for x in available_resources] ) ) break if safe: print("""The process is in a safe state.\n""" ) else: print("""System in unsafe state. Aborting...\n""" ) break def __UpperCAmelCase ( self :Dict ) -> Dict: '''simple docstring''' print(""" """ * 9 + """Allocated Resource Table""" ) for item in self.__allocated_resources_table: print( f"P{self.__allocated_resources_table.index(SCREAMING_SNAKE_CASE ) + 1}" + """ """.join(f"{it:>8}" for it in item ) + """\n""" ) print(""" """ * 9 + """System Resource Table""" ) for item in self.__maximum_claim_table: print( f"P{self.__maximum_claim_table.index(SCREAMING_SNAKE_CASE ) + 1}" + """ """.join(f"{it:>8}" for it in item ) + """\n""" ) print( """Current Usage by Active Processes: """ + """ """.join(str(SCREAMING_SNAKE_CASE ) for x in self.__claim_vector ) ) print( """Initial Available Resources: """ + """ """.join(str(SCREAMING_SNAKE_CASE ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F"{price_plus_tax(100, 0.25) = }") print(F"{price_plus_tax(125.50, 0.05) = }")
694
1
'''simple docstring''' import numpy as np def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : np.ndarray ,_UpperCAmelCase : float = 1e-12 ,_UpperCAmelCase : int = 100 ,) -> tuple[float, np.ndarray]: assert np.shape(_UpperCAmelCase )[0] == np.shape(_UpperCAmelCase )[1] # Ensure proper dimensionality. assert np.shape(_UpperCAmelCase )[0] == np.shape(_UpperCAmelCase )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(_UpperCAmelCase ) == np.iscomplexobj(_UpperCAmelCase ) _a : List[Any] =np.iscomplexobj(_UpperCAmelCase ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(_UpperCAmelCase ,input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. _a : Optional[Any] =False _a : int =0 _a : Any =0 _a : int =1e12 while not convergence: # Multiple matrix by the vector. _a : Dict =np.dot(_UpperCAmelCase ,_UpperCAmelCase ) # Normalize the resulting output vector. _a : str =w / np.linalg.norm(_UpperCAmelCase ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) _a : Union[str, Any] =vector.conj().T if is_complex else vector.T _a : int =np.dot(_UpperCAmelCase ,np.dot(_UpperCAmelCase ,_UpperCAmelCase ) ) # Check convergence. _a : Optional[int] =np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: _a : Any =True _a : Optional[Any] =lambda_ if is_complex: _a : int =np.real(lambda_ ) return lambda_, vector def SCREAMING_SNAKE_CASE_ ( ) -> None: _a : Optional[int] =np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) _a : Dict =np.array([41, 4, 20] ) _a : Optional[Any] =real_input_matrix.astype(np.complexaaa ) _a : Optional[Any] =np.triu(1J * complex_input_matrix ,1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T _a : Any =np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": _a : Tuple =real_input_matrix _a : List[str] =real_vector elif problem_type == "complex": _a : List[Any] =complex_input_matrix _a : List[Any] =complex_vector # Our implementation. _a , _a : Optional[int] =power_iteration(_UpperCAmelCase ,_UpperCAmelCase ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). _a , _a : List[Any] =np.linalg.eigh(_UpperCAmelCase ) # Last eigenvalue is the maximum one. _a : List[str] =eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. _a : List[Any] =eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1e-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(_UpperCAmelCase ) - np.abs(_UpperCAmelCase ) ) <= 1e-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
694
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class A__ ( unittest.TestCase ): def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple: '''simple docstring''' _a : Optional[Any] =parent _a : List[str] =batch_size _a : List[str] =seq_length _a : List[Any] =is_training _a : Optional[int] =use_attention_mask _a : List[Any] =use_token_type_ids _a : List[Any] =use_labels _a : Optional[Any] =vocab_size _a : str =hidden_size _a : List[Any] =num_hidden_layers _a : List[Any] =num_attention_heads _a : Union[str, Any] =intermediate_size _a : int =hidden_act _a : List[str] =hidden_dropout_prob _a : Optional[int] =attention_probs_dropout_prob _a : Dict =max_position_embeddings _a : Any =type_vocab_size _a : str =type_sequence_label_size _a : str =initializer_range _a : List[str] =num_choices def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict: '''simple docstring''' _a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : Dict =None if self.use_attention_mask: _a : Any =random_attention_mask([self.batch_size, self.seq_length] ) _a : Optional[int] =None if self.use_token_type_ids: _a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _a : Union[str, Any] =RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' _a : Tuple =self.prepare_config_and_inputs() _a , _a , _a , _a : List[Any] =config_and_inputs _a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def __UpperCAmelCase ( self :int ) -> str: '''simple docstring''' _a : List[Any] =self.prepare_config_and_inputs() _a , _a , _a , _a : Optional[int] =config_and_inputs _a : Tuple =True _a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : Union[str, Any] = True __UpperCamelCase : Dict = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCAmelCase ( self :List[str] ) -> Optional[int]: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self ) @slow def __UpperCAmelCase ( self :str ) -> int: '''simple docstring''' for model_class_name in self.all_model_classes: _a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Dict =model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_flax class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Any ) -> str: '''simple docstring''' _a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Dict =model(SCREAMING_SNAKE_CASE )[0] _a : List[Any] =[1, 1_1, 5_0_2_6_5] self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. _a : Any =np.array( [[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def __UpperCAmelCase ( self :int ) -> int: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. _a : str =np.array( [[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
694
1
'''simple docstring''' import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) A__: Dict = logging.getLogger(__name__) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> List[Any]: _a : List[str] =git.Repo(search_parent_directories=_UpperCAmelCase ) _a : Any ={ """repo_id""": str(_UpperCAmelCase ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), } with open(os.path.join(_UpperCAmelCase ,"""git_log.json""" ) ,"""w""" ) as f: json.dump(_UpperCAmelCase ,_UpperCAmelCase ,indent=4 ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ) -> Optional[int]: if params.n_gpu <= 0: _a : Optional[int] =0 _a : List[Any] =-1 _a : Optional[Any] =True _a : Union[str, Any] =False return assert torch.cuda.is_available() logger.info("""Initializing GPUs""" ) if params.n_gpu > 1: assert params.local_rank != -1 _a : List[str] =int(os.environ["""WORLD_SIZE"""] ) _a : List[Any] =int(os.environ["""N_GPU_NODE"""] ) _a : List[str] =int(os.environ["""RANK"""] ) # number of nodes / node ID _a : Union[str, Any] =params.world_size // params.n_gpu_per_node _a : List[str] =params.global_rank // params.n_gpu_per_node _a : Optional[int] =True assert params.n_nodes == int(os.environ["""N_NODES"""] ) assert params.node_id == int(os.environ["""NODE_RANK"""] ) # local job (single GPU) else: assert params.local_rank == -1 _a : List[str] =1 _a : Optional[int] =0 _a : Any =0 _a : List[Any] =0 _a : str =1 _a : List[str] =1 _a : int =False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode _a : int =params.node_id == 0 and params.local_rank == 0 _a : Tuple =params.n_nodes > 1 # summary _a : str =F"--- Global rank: {params.global_rank} - " logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes ) logger.info(PREFIX + """Node ID : %i""" % params.node_id ) logger.info(PREFIX + """Local rank : %i""" % params.local_rank ) logger.info(PREFIX + """World size : %i""" % params.world_size ) logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node ) logger.info(PREFIX + """Master : %s""" % str(params.is_master ) ) logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) ) logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) ) logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("""Initializing PyTorch distributed""" ) torch.distributed.init_process_group( init_method="""env://""" ,backend="""nccl""" ,) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> Optional[Any]: np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
694
'''simple docstring''' import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A__: Tuple = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]: return field(default_factory=lambda: default ,metadata=_UpperCAmelCase ) @dataclass class A__ : __UpperCamelCase : List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) __UpperCamelCase : List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) __UpperCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) __UpperCamelCase : str = field( default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , ) __UpperCamelCase : str = field( default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , ) __UpperCamelCase : str = field( default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , ) __UpperCamelCase : str = field( default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , ) __UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' warnings.warn( f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils" """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[str]: '''simple docstring''' if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
694
1
'''simple docstring''' from math import isqrt def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> bool: return all(number % divisor != 0 for divisor in range(2 ,isqrt(_UpperCAmelCase ) + 1 ) ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10**6 ) -> int: _a : List[Any] =0 _a : List[str] =1 _a : str =7 while prime_candidate < max_prime: primes_count += is_prime(_UpperCAmelCase ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F"{solution() = }")
694
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class A__ ( UpperCAmelCase__ ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]: '''simple docstring''' _a : int =1.0 if scale is None else scale _a : Optional[Any] =0.0 if loc is None else loc super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] ) @property def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return self.base_dist.mean * self.scale + self.loc @property def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return self.base_dist.variance * self.scale**2 @property def __UpperCAmelCase ( self :Any ) -> List[str]: '''simple docstring''' return self.variance.sqrt() class A__ ( nn.Module ): def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE ) _a : Tuple =args_dim _a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] ) _a : Dict =domain_map def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]: '''simple docstring''' _a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj] return self.domain_map(*SCREAMING_SNAKE_CASE ) class A__ ( nn.Module ): def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int: '''simple docstring''' super().__init__() _a : List[Any] =function def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]: '''simple docstring''' return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) class A__ : __UpperCamelCase : type __UpperCamelCase : int __UpperCamelCase : Dict[str, int] def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None: '''simple docstring''' _a : Any =dim _a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim} def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]: '''simple docstring''' if self.dim == 1: return self.distribution_class(*SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution: '''simple docstring''' _a : str =self._base_distribution(SCREAMING_SNAKE_CASE ) if loc is None and scale is None: return distr else: return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim ) @property def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' return () if self.dim == 1 else (self.dim,) @property def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return len(self.event_shape ) @property def __UpperCAmelCase ( self :Any ) -> float: '''simple docstring''' return 0.0 def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module: '''simple docstring''' return ParameterProjection( in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any: '''simple docstring''' raise NotImplementedError() @staticmethod def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor: '''simple docstring''' return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0 class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __UpperCamelCase : type = StudentT @classmethod def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]: '''simple docstring''' _a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) _a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1} __UpperCamelCase : type = Normal @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict: '''simple docstring''' _a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1} __UpperCamelCase : type = NegativeBinomial @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]: '''simple docstring''' _a : int =cls.squareplus(SCREAMING_SNAKE_CASE ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution: '''simple docstring''' _a , _a : Any =distr_args if self.dim == 1: return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution: '''simple docstring''' _a , _a : Optional[int] =distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
694
1
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class A__ ( unittest.TestCase ): def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' _a : Union[str, Any] =0 @slow def __UpperCAmelCase ( self :Any ) -> Optional[int]: '''simple docstring''' for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): _a : str =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(SCREAMING_SNAKE_CASE ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): _a : str =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(SCREAMING_SNAKE_CASE ) , 0 ) def __UpperCAmelCase ( self :Any ) -> Tuple: '''simple docstring''' _a : Dict =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def __UpperCAmelCase ( self :int ) -> List[str]: '''simple docstring''' _a : Union[str, Any] =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 2_0 ) def __UpperCAmelCase ( self :Optional[int] ) -> int: '''simple docstring''' _a : Dict =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Check that tokenizer_type ≠ model_type _a : List[str] =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def __UpperCAmelCase ( self :Optional[int] ) -> Tuple: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(SCREAMING_SNAKE_CASE , """vocab.txt""" ) ) _a : Optional[int] =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , tokenizer_type="""bert""" , use_fast=SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(SCREAMING_SNAKE_CASE , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(SCREAMING_SNAKE_CASE , """merges.txt""" ) ) _a : int =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , tokenizer_type="""gpt2""" , use_fast=SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @require_tokenizers def __UpperCAmelCase ( self :Tuple ) -> Dict: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(SCREAMING_SNAKE_CASE , """vocab.txt""" ) ) _a : int =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , tokenizer_type="""bert""" ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(SCREAMING_SNAKE_CASE , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(SCREAMING_SNAKE_CASE , """merges.txt""" ) ) _a : Optional[Any] =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , tokenizer_type="""gpt2""" ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Any ) -> Optional[Any]: '''simple docstring''' with pytest.raises(SCREAMING_SNAKE_CASE ): AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" ) @require_tokenizers def __UpperCAmelCase ( self :Tuple ) -> List[Any]: '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: _a : Union[str, Any] =tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" ) self.assertIsInstance(SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , SCREAMING_SNAKE_CASE ) else: self.assertEqual(tokenizer.do_lower_case , SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) @require_tokenizers def __UpperCAmelCase ( self :str ) -> int: '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ): _a : Optional[Any] =tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" ) def __UpperCAmelCase ( self :Tuple ) -> Dict: '''simple docstring''' # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai _a : Any =TOKENIZER_MAPPING.values() _a : Any =[] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(SCREAMING_SNAKE_CASE ) @require_tokenizers def __UpperCAmelCase ( self :str ) -> Optional[int]: '''simple docstring''' self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , SCREAMING_SNAKE_CASE ) @require_tokenizers def __UpperCAmelCase ( self :str ) -> Optional[Any]: '''simple docstring''' _a : Tuple =AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=SCREAMING_SNAKE_CASE ) _a : Optional[int] ="""Hello, world. How are you?""" _a : Any =tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertEqual("""[UNK]""" , tokens[0] ) _a : Dict =AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=SCREAMING_SNAKE_CASE ) _a : List[Any] =tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertEqual("""[UNK]""" , tokens[0] ) @require_tokenizers def __UpperCAmelCase ( self :List[Any] ) -> int: '''simple docstring''' _a : List[str] =AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" ) self.assertEqual(type(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 ) self.assertEqual(tokenizer.unk_token , """[UNK]""" ) self.assertEqual(tokenizer.padding_side , """right""" ) self.assertEqual(tokenizer.truncation_side , """right""" ) def __UpperCAmelCase ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' _a : List[str] =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(SCREAMING_SNAKE_CASE ) _a : Any =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 1_2 ) def __UpperCAmelCase ( self :Any ) -> List[str]: '''simple docstring''' _a : List[str] =AutoTokenizer.from_pretrained("""ctrl""" ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' # Check we can load the tokenizer config of an online model. _a : Any =get_tokenizer_config("""bert-base-cased""" ) _a : Dict =config.pop("""_commit_hash""" , SCREAMING_SNAKE_CASE ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(SCREAMING_SNAKE_CASE , {"""do_lower_case""": False} ) # This model does not have a tokenizer_config so we get back an empty dict. _a : Optional[Any] =get_tokenizer_config(SCREAMING_SNAKE_CASE ) self.assertDictEqual(SCREAMING_SNAKE_CASE , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. _a : str =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(SCREAMING_SNAKE_CASE ) _a : Tuple =get_tokenizer_config(SCREAMING_SNAKE_CASE ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" ) def __UpperCAmelCase ( self :Any ) -> Tuple: '''simple docstring''' try: AutoConfig.register("""custom""" , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) _a : Optional[Any] =CustomTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(SCREAMING_SNAKE_CASE ) _a : List[Any] =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def __UpperCAmelCase ( self :Union[str, Any] ) -> Any: '''simple docstring''' try: AutoConfig.register("""custom""" , SCREAMING_SNAKE_CASE ) # Can register in two steps AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , fast_tokenizer_class=SCREAMING_SNAKE_CASE ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE , fast_tokenizer_class=SCREAMING_SNAKE_CASE ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): AutoTokenizer.register(SCREAMING_SNAKE_CASE , fast_tokenizer_class=SCREAMING_SNAKE_CASE ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: _a : str =BertTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE ) bert_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE ) _a : Any =CustomTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(SCREAMING_SNAKE_CASE ) _a : str =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Any =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def __UpperCAmelCase ( self :Union[str, Any] ) -> Any: '''simple docstring''' # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(SCREAMING_SNAKE_CASE ): _a : str =AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE ): _a : str =AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=SCREAMING_SNAKE_CASE ) _a : str =AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(SCREAMING_SNAKE_CASE ) _a : Tuple =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version _a : int =AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(SCREAMING_SNAKE_CASE ) _a : List[str] =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) @require_tokenizers def __UpperCAmelCase ( self :Optional[Any] ) -> List[Any]: '''simple docstring''' class A__ ( UpperCAmelCase__ ): __UpperCamelCase : int = False class A__ ( UpperCAmelCase__ ): __UpperCamelCase : List[Any] = NewTokenizer __UpperCamelCase : Union[str, Any] = False try: AutoConfig.register("""custom""" , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , fast_tokenizer_class=SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local _a : List[str] =AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) _a : Union[str, Any] =AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. _a : Optional[Any] =AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) _a : int =AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub _a : Tuple =AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertTrue(tokenizer.special_attribute_present ) _a : Union[str, Any] =AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' _a : Tuple =AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version _a : Optional[Any] =AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple: '''simple docstring''' with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , """bert-base is not a local folder and is not a valid model identifier""" ): _a : List[Any] =AutoTokenizer.from_pretrained("""bert-base""" ) def __UpperCAmelCase ( self :Optional[Any] ) -> Dict: '''simple docstring''' with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): _a : Optional[Any] =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , revision="""aaaaaa""" ) def __UpperCAmelCase ( self :str ) -> Dict: '''simple docstring''' # Make sure we have cached the tokenizer. _a : Optional[int] =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) with RequestCounter() as counter: _a : List[Any] =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number | (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number & ~(1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number ^ (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool: return ((number >> position) & 1) == 1 def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) A__: Optional[int] = 2_9979_2458 # Symbols A__ , A__ , A__ , A__: Optional[int] = symbols('''ct x y z''') def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ) -> float: if velocity > c: raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("""Speed must be greater than or equal to 1!""" ) return velocity / c def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ) -> float: return 1 / sqrt(1 - beta(_UpperCAmelCase ) ** 2 ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ) -> np.ndarray: return np.array( [ [gamma(_UpperCAmelCase ), -gamma(_UpperCAmelCase ) * beta(_UpperCAmelCase ), 0, 0], [-gamma(_UpperCAmelCase ) * beta(_UpperCAmelCase ), gamma(_UpperCAmelCase ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : np.ndarray | None = None ) -> np.ndarray: # Ensure event is not empty if event is None: _a : int =np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(_UpperCAmelCase ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: A__: int = transform(2997_9245) print('''Example of four vector: ''') print(F"ct' = {four_vector[0]}") print(F"x' = {four_vector[1]}") print(F"y' = {four_vector[2]}") print(F"z' = {four_vector[3]}") # Substitute symbols with numerical values A__: Optional[Any] = {ct: c, x: 1, y: 1, z: 1} A__: Any = [four_vector[i].subs(sub_dict) for i in range(4)] print(F"\n{numerical_vector}")
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None: if point: if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): for item in point: if not isinstance(_UpperCAmelCase ,(int, float) ): _a : str =( """Expected a list of numbers as input, found """ F"{type(_UpperCAmelCase ).__name__}" ) raise TypeError(_UpperCAmelCase ) else: _a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}" raise TypeError(_UpperCAmelCase ) else: raise ValueError("""Missing an input""" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class A__ : @staticmethod def __UpperCAmelCase ( *SCREAMING_SNAKE_CASE :str , **SCREAMING_SNAKE_CASE :Optional[Any] ) -> List[Any]: '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class A__ ( unittest.TestCase ): __UpperCamelCase : Tuple = MODEL_FOR_OBJECT_DETECTION_MAPPING def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any ) -> Optional[Any]: '''simple docstring''' _a : Optional[int] =ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _a : int =object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 ) self.assertGreater(len(SCREAMING_SNAKE_CASE ) , 0 ) for detected_object in outputs: self.assertEqual( SCREAMING_SNAKE_CASE , { """score""": ANY(SCREAMING_SNAKE_CASE ), """label""": ANY(SCREAMING_SNAKE_CASE ), """box""": {"""xmin""": ANY(SCREAMING_SNAKE_CASE ), """ymin""": ANY(SCREAMING_SNAKE_CASE ), """xmax""": ANY(SCREAMING_SNAKE_CASE ), """ymax""": ANY(SCREAMING_SNAKE_CASE )}, } , ) import datasets _a : List[str] =datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) _a : Union[str, Any] =[ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] _a : int =object_detector(SCREAMING_SNAKE_CASE , threshold=0.0 ) self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) ) for outputs in batch_outputs: self.assertGreater(len(SCREAMING_SNAKE_CASE ) , 0 ) for detected_object in outputs: self.assertEqual( SCREAMING_SNAKE_CASE , { """score""": ANY(SCREAMING_SNAKE_CASE ), """label""": ANY(SCREAMING_SNAKE_CASE ), """box""": {"""xmin""": ANY(SCREAMING_SNAKE_CASE ), """ymin""": ANY(SCREAMING_SNAKE_CASE ), """xmax""": ANY(SCREAMING_SNAKE_CASE ), """ymax""": ANY(SCREAMING_SNAKE_CASE )}, } , ) @require_tf @unittest.skip("""Object detection not implemented in TF""" ) def __UpperCAmelCase ( self :int ) -> int: '''simple docstring''' pass @require_torch def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' _a : Optional[int] ="""hf-internal-testing/tiny-detr-mobilenetsv3""" _a : Any =AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE ) _a : Any =AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) _a : List[Any] =ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE ) _a : Optional[int] =object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}}, {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}}, ] , ) _a : Optional[Any] =object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ [ {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}}, {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}}, ], [ {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}}, {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}}, ], ] , ) @require_torch @slow def __UpperCAmelCase ( self :Dict ) -> Optional[Any]: '''simple docstring''' _a : Optional[int] ="""facebook/detr-resnet-50""" _a : Dict =AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE ) _a : int =AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) _a : Any =ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE ) _a : str =object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ] , ) _a : Dict =object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ], [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ], ] , ) @require_torch @slow def __UpperCAmelCase ( self :str ) -> List[str]: '''simple docstring''' _a : Any ="""facebook/detr-resnet-50""" _a : Union[str, Any] =pipeline("""object-detection""" , model=SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ] , ) _a : int =object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ], [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ], ] , ) @require_torch @slow def __UpperCAmelCase ( self :Any ) -> Union[str, Any]: '''simple docstring''' _a : List[str] =0.9_985 _a : int ="""facebook/detr-resnet-50""" _a : str =pipeline("""object-detection""" , model=SCREAMING_SNAKE_CASE ) _a : Optional[int] =object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=SCREAMING_SNAKE_CASE ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ] , ) @require_torch @require_pytesseract @slow def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict: '''simple docstring''' _a : List[str] ="""Narsil/layoutlmv3-finetuned-funsd""" _a : Tuple =0.9_993 _a : int =pipeline("""object-detection""" , model=SCREAMING_SNAKE_CASE , threshold=SCREAMING_SNAKE_CASE ) _a : List[str] =object_detector( """https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_9_4, """ymin""": 2_5_4, """xmax""": 3_4_3, """ymax""": 2_6_4}}, {"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_9_4, """ymin""": 2_5_4, """xmax""": 3_4_3, """ymax""": 2_6_4}}, ] , )
694
'''simple docstring''' from __future__ import annotations class A__ : def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None: '''simple docstring''' _a : int =order # a_{0} ... a_{k} _a : Optional[Any] =[1.0] + [0.0] * order # b_{0} ... b_{k} _a : Tuple =[1.0] + [0.0] * order # x[n-1] ... x[n-k] _a : List[Any] =[0.0] * self.order # y[n-1] ... y[n-k] _a : Tuple =[0.0] * self.order def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None: '''simple docstring''' if len(SCREAMING_SNAKE_CASE ) < self.order: _a : int =[1.0, *a_coeffs] if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : int =( f"Expected a_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : Optional[Any] =( f"Expected b_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) _a : List[str] =a_coeffs _a : Union[str, Any] =b_coeffs def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float: '''simple docstring''' _a : str =0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) _a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0] _a : str =self.input_history[:-1] _a : Optional[Any] =self.output_history[:-1] _a : Optional[int] =sample _a : Tuple =result return result
694
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None: if point: if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): for item in point: if not isinstance(_UpperCAmelCase ,(int, float) ): _a : str =( """Expected a list of numbers as input, found """ F"{type(_UpperCAmelCase ).__name__}" ) raise TypeError(_UpperCAmelCase ) else: _a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}" raise TypeError(_UpperCAmelCase ) else: raise ValueError("""Missing an input""" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int: for nxt, d in graph[v]: if nxt in visited_forward: continue _a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf ) _a : int =cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) _a : Tuple =new_cost_f _a : Optional[Any] =v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: _a : str =cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int: _a : Optional[Any] =-1 _a : List[str] =set() _a : Optional[int] =set() _a : Optional[int] ={source: 0} _a : List[str] ={destination: 0} _a : Union[str, Any] ={source: None} _a : Dict ={destination: None} _a : PriorityQueue[Any] =PriorityQueue() _a : PriorityQueue[Any] =PriorityQueue() _a : Optional[int] =np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): _a , _a : str =queue_forward.get() visited_forward.add(_UpperCAmelCase ) _a , _a : List[Any] =queue_backward.get() visited_backward.add(_UpperCAmelCase ) _a : int =pass_and_relaxation( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) _a : Any =pass_and_relaxation( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: _a : Any =shortest_distance return shortest_path_distance A__: Union[str, Any] = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } A__: str = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list[int]] ) -> bool: _a : Tuple =len(_UpperCAmelCase ) # We need to create solution object to save path. _a : str =[[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )] _a : List[Any] =run_maze(_UpperCAmelCase ,0 ,0 ,_UpperCAmelCase ) if solved: print("""\n""".join(str(_UpperCAmelCase ) for row in solutions ) ) else: print("""No solution exists!""" ) return solved def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list[int]] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : list[list[int]] ) -> bool: _a : int =len(_UpperCAmelCase ) # Final check point. if i == j == (size - 1): _a : Optional[Any] =1 return True _a : Any =(not i < 0) and (not j < 0) # Check lower bounds _a : List[str] =(i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. _a : Union[str, Any] =(not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited _a : Tuple =1 # check for directions if ( run_maze(_UpperCAmelCase ,i + 1 ,_UpperCAmelCase ,_UpperCAmelCase ) or run_maze(_UpperCAmelCase ,_UpperCAmelCase ,j + 1 ,_UpperCAmelCase ) or run_maze(_UpperCAmelCase ,i - 1 ,_UpperCAmelCase ,_UpperCAmelCase ) or run_maze(_UpperCAmelCase ,_UpperCAmelCase ,j - 1 ,_UpperCAmelCase ) ): return True _a : Any =0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' from math import factorial def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int: return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
694
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]: _a : Optional[Any] =[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] _a : str =6 _a : Any =1 _a : List[str] =1901 _a : Dict =0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _a : Union[str, Any] =day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 _a : List[str] =day - 29 else: if day > days_per_month[month - 1]: month += 1 _a : int =day - days_per_month[month - 2] if month > 12: year += 1 _a : Union[str, Any] =1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list: _a : Tuple =len(_UpperCAmelCase ) _a : str =[] for i in range(len(_UpperCAmelCase ) - pat_len + 1 ): _a : int =True for j in range(_UpperCAmelCase ): if s[i + j] != pattern[j]: _a : int =False break if match_found: position.append(_UpperCAmelCase ) return position if __name__ == "__main__": assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3] print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
694
1
'''simple docstring''' from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
694
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class A__ ( unittest.TestCase ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple: '''simple docstring''' _a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8} _a : int =parent _a : Optional[int] =batch_size _a : List[str] =num_channels _a : Optional[Any] =image_size _a : int =min_resolution _a : str =max_resolution _a : str =do_resize _a : Tuple =size _a : Tuple =do_normalize def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804], [-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self :List[Any] ) -> List[Any]: '''simple docstring''' _a : Any =ImageGPTImageProcessingTester(self ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self :Dict ) -> Any: '''simple docstring''' _a : Optional[Any] =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) ) def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]: '''simple docstring''' _a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} ) _a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} ) def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]: '''simple docstring''' _a : List[str] =self.image_processing_class(**self.image_processor_dict ) _a : Dict =json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) ) else: self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' _a : List[Any] =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" ) image_processor_first.to_json_file(SCREAMING_SNAKE_CASE ) _a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict() _a : Tuple =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> str: '''simple docstring''' _a : List[str] =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE ) _a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict() _a : Union[str, Any] =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: _a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" ) _a : Dict =Image.open(dataset[4]["""file"""] ) _a : Optional[int] =Image.open(dataset[5]["""file"""] ) _a : Optional[Any] =[imagea, imagea] return images @require_vision @require_torch class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) _a : int =prepare_images() # test non-batched _a : Dict =image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) ) _a : Optional[int] =[3_0_6, 1_9_1, 1_9_1] self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE ) # test batched _a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) ) _a : Any =[3_0_3, 1_3, 1_3] self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
694
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__: Any = { '''configuration_xlm_roberta_xl''': [ '''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaXLConfig''', '''XLMRobertaXLOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Optional[int] = [ '''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaXLForCausalLM''', '''XLMRobertaXLForMaskedLM''', '''XLMRobertaXLForMultipleChoice''', '''XLMRobertaXLForQuestionAnswering''', '''XLMRobertaXLForSequenceClassification''', '''XLMRobertaXLForTokenClassification''', '''XLMRobertaXLModel''', '''XLMRobertaXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys A__: List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool: _a : Optional[int] =len(_UpperCAmelCase ) _a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): _a : Any =True # sum is not zero and set is empty then false for i in range(1 ,required_sum + 1 ): _a : int =False for i in range(1 ,arr_len + 1 ): for j in range(1 ,required_sum + 1 ): if arr[i - 1] > j: _a : Optional[Any] =subset[i - 1][j] if arr[i - 1] <= j: _a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig A__: Dict = [ '''openmmlab/upernet-convnext-tiny''', # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring A__: Tuple = '''UperNetConfig''' class A__ ( nn.Module ): def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Union[int, Tuple[int, int]] , SCREAMING_SNAKE_CASE :Union[int, Tuple[int, int], str] = 0 , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :Union[int, Tuple[int, int]] = 1 , ) -> None: '''simple docstring''' super().__init__() _a : int =nn.Convad( in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , kernel_size=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE , dilation=SCREAMING_SNAKE_CASE , ) _a : str =nn.BatchNormad(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =nn.ReLU() def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor: '''simple docstring''' _a : List[str] =self.conv(SCREAMING_SNAKE_CASE ) _a : int =self.batch_norm(SCREAMING_SNAKE_CASE ) _a : Optional[int] =self.activation(SCREAMING_SNAKE_CASE ) return output class A__ ( nn.Module ): def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int ) -> None: '''simple docstring''' super().__init__() _a : str =[ nn.AdaptiveAvgPoolad(SCREAMING_SNAKE_CASE ), UperNetConvModule(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor: '''simple docstring''' _a : str =input for layer in self.layers: _a : List[str] =layer(SCREAMING_SNAKE_CASE ) return hidden_state class A__ ( nn.Module ): def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Tuple[int, ...] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :bool ) -> None: '''simple docstring''' super().__init__() _a : Tuple =pool_scales _a : Any =align_corners _a : Any =in_channels _a : List[Any] =channels _a : List[Any] =[] for i, pool_scale in enumerate(SCREAMING_SNAKE_CASE ): _a : str =UperNetPyramidPoolingBlock(pool_scale=SCREAMING_SNAKE_CASE , in_channels=SCREAMING_SNAKE_CASE , channels=SCREAMING_SNAKE_CASE ) self.blocks.append(SCREAMING_SNAKE_CASE ) self.add_module(str(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :torch.Tensor ) -> List[torch.Tensor]: '''simple docstring''' _a : Optional[int] =[] for ppm in self.blocks: _a : Optional[Any] =ppm(SCREAMING_SNAKE_CASE ) _a : Dict =nn.functional.interpolate( SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners ) ppm_outs.append(SCREAMING_SNAKE_CASE ) return ppm_outs class A__ ( nn.Module ): def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any] ) -> List[str]: '''simple docstring''' super().__init__() _a : int =config _a : int =config.pool_scales # e.g. (1, 2, 3, 6) _a : Tuple =in_channels _a : List[str] =config.hidden_size _a : Dict =False _a : Optional[Any] =nn.Convad(self.channels , config.num_labels , kernel_size=1 ) # PSP Module _a : Dict =UperNetPyramidPoolingModule( self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , ) _a : Union[str, Any] =UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) # FPN Module _a : int =nn.ModuleList() _a : Optional[Any] =nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer _a : str =UperNetConvModule(SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 ) _a : str =UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 ) self.lateral_convs.append(SCREAMING_SNAKE_CASE ) self.fpn_convs.append(SCREAMING_SNAKE_CASE ) _a : Tuple =UperNetConvModule( len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) def __UpperCAmelCase ( self :List[Any] ) -> List[Any]: '''simple docstring''' self.apply(self._init_weights ) def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :List[Any] ) -> Dict: '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]: '''simple docstring''' _a : Optional[int] =inputs[-1] _a : Tuple =[x] psp_outs.extend(self.psp_modules(SCREAMING_SNAKE_CASE ) ) _a : List[str] =torch.cat(SCREAMING_SNAKE_CASE , dim=1 ) _a : Any =self.bottleneck(SCREAMING_SNAKE_CASE ) return output def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor: '''simple docstring''' # build laterals _a : Optional[int] =[lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(SCREAMING_SNAKE_CASE ) ) # build top-down path _a : str =len(SCREAMING_SNAKE_CASE ) for i in range(used_backbone_levels - 1 , 0 , -1 ): _a : Any =laterals[i - 1].shape[2:] _a : Dict =laterals[i - 1] + nn.functional.interpolate( laterals[i] , size=SCREAMING_SNAKE_CASE , mode="""bilinear""" , align_corners=self.align_corners ) # build outputs _a : List[str] =[self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 , 0 , -1 ): _a : str =nn.functional.interpolate( fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners ) _a : str =torch.cat(SCREAMING_SNAKE_CASE , dim=1 ) _a : List[Any] =self.fpn_bottleneck(SCREAMING_SNAKE_CASE ) _a : int =self.classifier(SCREAMING_SNAKE_CASE ) return output class A__ ( nn.Module ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int = 2 , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :Union[int, Tuple[int, int]] = 1 ) -> None: '''simple docstring''' super().__init__() _a : Tuple =config _a : Optional[Any] =config.auxiliary_in_channels _a : int =config.auxiliary_channels _a : Optional[int] =config.auxiliary_num_convs _a : Union[str, Any] =config.auxiliary_concat_input _a : List[Any] =in_index _a : Tuple =(kernel_size // 2) * dilation _a : List[Any] =[] convs.append( UperNetConvModule( self.in_channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , dilation=SCREAMING_SNAKE_CASE ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , dilation=SCREAMING_SNAKE_CASE ) ) if self.num_convs == 0: _a : Optional[Any] =nn.Identity() else: _a : Any =nn.Sequential(*SCREAMING_SNAKE_CASE ) if self.concat_input: _a : List[str] =UperNetConvModule( self.in_channels + self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE , padding=kernel_size // 2 ) _a : Optional[int] =nn.Convad(self.channels , config.num_labels , kernel_size=1 ) def __UpperCAmelCase ( self :Optional[Any] ) -> Any: '''simple docstring''' self.apply(self._init_weights ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :List[Any] ) -> int: '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor: '''simple docstring''' # just take the relevant feature maps _a : str =encoder_hidden_states[self.in_index] _a : Tuple =self.convs(SCREAMING_SNAKE_CASE ) if self.concat_input: _a : Dict =self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) ) _a : Optional[Any] =self.classifier(SCREAMING_SNAKE_CASE ) return output class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Union[str, Any] = UperNetConfig __UpperCamelCase : int = "pixel_values" __UpperCamelCase : List[str] = True def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :int ) -> Optional[Any]: '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def __UpperCAmelCase ( self :int ) -> Any: '''simple docstring''' self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int=False ) -> Optional[Any]: '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _a : str =value A__: str = R''' Parameters: This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. config ([`UperNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' A__: Dict = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , UpperCAmelCase__ , ) class A__ ( UpperCAmelCase__ ): def __init__( self :str , SCREAMING_SNAKE_CASE :Dict ) -> str: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) _a : int =AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) _a : Tuple =UperNetHead(SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels ) _a : Tuple =UperNetFCNHead(SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) ) @replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC ) def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]: '''simple docstring''' _a : Any =return_dict if return_dict is not None else self.config.use_return_dict _a : Union[str, Any] =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _a : int =output_attentions if output_attentions is not None else self.config.output_attentions _a : List[str] =self.backbone.forward_with_filtered_kwargs( SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE ) _a : int =outputs.feature_maps _a : Tuple =self.decode_head(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =nn.functional.interpolate(SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=SCREAMING_SNAKE_CASE ) _a : Dict =None if self.auxiliary_head is not None: _a : Optional[Any] =self.auxiliary_head(SCREAMING_SNAKE_CASE ) _a : Dict =nn.functional.interpolate( SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =None if labels is not None: if self.config.num_labels == 1: raise ValueError("""The number of labels should be greater than one""" ) else: # compute weighted loss _a : str =CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) _a : Optional[Any] =loss_fct(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Tuple =loss_fct(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Dict =main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: _a : Any =(logits,) + outputs[1:] else: _a : str =(logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int: _a : Optional[Any] =[] _a , _a : Union[str, Any] =0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_UpperCAmelCase ) _a , _a : Optional[Any] =b, a + b return sum(_UpperCAmelCase ) if __name__ == "__main__": print(F"{solution() = }")
694
1
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class A__ ( UpperCAmelCase__ ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]: '''simple docstring''' _a : int =1.0 if scale is None else scale _a : Optional[Any] =0.0 if loc is None else loc super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] ) @property def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return self.base_dist.mean * self.scale + self.loc @property def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return self.base_dist.variance * self.scale**2 @property def __UpperCAmelCase ( self :Any ) -> List[str]: '''simple docstring''' return self.variance.sqrt() class A__ ( nn.Module ): def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE ) _a : Tuple =args_dim _a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] ) _a : Dict =domain_map def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]: '''simple docstring''' _a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj] return self.domain_map(*SCREAMING_SNAKE_CASE ) class A__ ( nn.Module ): def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int: '''simple docstring''' super().__init__() _a : List[Any] =function def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]: '''simple docstring''' return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) class A__ : __UpperCamelCase : type __UpperCamelCase : int __UpperCamelCase : Dict[str, int] def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None: '''simple docstring''' _a : Any =dim _a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim} def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]: '''simple docstring''' if self.dim == 1: return self.distribution_class(*SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution: '''simple docstring''' _a : str =self._base_distribution(SCREAMING_SNAKE_CASE ) if loc is None and scale is None: return distr else: return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim ) @property def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' return () if self.dim == 1 else (self.dim,) @property def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return len(self.event_shape ) @property def __UpperCAmelCase ( self :Any ) -> float: '''simple docstring''' return 0.0 def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module: '''simple docstring''' return ParameterProjection( in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any: '''simple docstring''' raise NotImplementedError() @staticmethod def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor: '''simple docstring''' return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0 class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __UpperCamelCase : type = StudentT @classmethod def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]: '''simple docstring''' _a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) _a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1} __UpperCamelCase : type = Normal @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict: '''simple docstring''' _a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1} __UpperCamelCase : type = NegativeBinomial @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]: '''simple docstring''' _a : int =cls.squareplus(SCREAMING_SNAKE_CASE ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution: '''simple docstring''' _a , _a : Any =distr_args if self.dim == 1: return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution: '''simple docstring''' _a , _a : Optional[int] =distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return 0 elif n == 2: return 1 else: _a : Dict =[0, 1] for i in range(2 ,n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: _a : Union[str, Any] =0 _a : Optional[Any] =2 while digits < n: index += 1 _a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) ) return index def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int: return fibonacci_digits_index(_UpperCAmelCase ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
694
1
'''simple docstring''' import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class A__ : def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :str = "cpu" , SCREAMING_SNAKE_CASE :str = "openai/clip-vit-large-patch14" ) -> None: '''simple docstring''' _a : Any =device _a : Any =CLIPTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE ) _a : int =[0.48_145_466, 0.4_578_275, 0.40_821_073] _a : Tuple =[0.26_862_954, 0.26_130_258, 0.27_577_711] _a : Optional[int] =torchvision.transforms.Normalize(self.image_mean , self.image_std ) _a : Dict =torchvision.transforms.Resize(2_2_4 ) _a : Optional[Any] =torchvision.transforms.CenterCrop(2_2_4 ) def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :Optional[int] ) -> List[str]: '''simple docstring''' _a : Dict =self.resize(SCREAMING_SNAKE_CASE ) _a : Dict =self.center_crop(SCREAMING_SNAKE_CASE ) _a : Optional[Any] =self.normalize(SCREAMING_SNAKE_CASE ) return images def __call__( self :Dict , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :Optional[Any]=None , **SCREAMING_SNAKE_CASE :Optional[int] ) -> List[str]: '''simple docstring''' _a : Union[str, Any] =self.tokenizer(text=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) _a : List[str] =self.preprocess_img(SCREAMING_SNAKE_CASE ) _a : List[Any] ={key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class A__ ( nn.Module ): def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int=1_0 , SCREAMING_SNAKE_CASE :str=0.01 , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :Optional[int]=None , SCREAMING_SNAKE_CASE :Optional[int]=None , SCREAMING_SNAKE_CASE :Any=None , SCREAMING_SNAKE_CASE :Optional[int]=None , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :int="image" , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Optional[Any]=False , SCREAMING_SNAKE_CASE :int=False , SCREAMING_SNAKE_CASE :List[Any]=False , ) -> None: '''simple docstring''' super().__init__() _a : Any =None _a : List[str] =device if device else get_device() if vqgan: _a : List[str] =vqgan else: _a : Dict =load_vqgan(self.device , conf_path=SCREAMING_SNAKE_CASE , ckpt_path=SCREAMING_SNAKE_CASE ) self.vqgan.eval() if clip: _a : int =clip else: _a : List[Any] =CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" ) self.clip.to(self.device ) _a : List[Any] =ProcessorGradientFlow(device=self.device ) _a : Optional[int] =iterations _a : str =lr _a : int =log _a : List[Any] =make_grid _a : int =return_val _a : List[Any] =quantize _a : Any =self.vqgan.decoder.z_shape def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :Optional[Any]=None , SCREAMING_SNAKE_CASE :str=None , SCREAMING_SNAKE_CASE :Optional[Any]=5 , SCREAMING_SNAKE_CASE :Union[str, Any]=True ) -> List[Any]: '''simple docstring''' _a : Dict =[] if output_path is None: _a : Tuple ="""./animation.gif""" if input_path is None: _a : int =self.save_path _a : Tuple =sorted(glob(input_path + """/*""" ) ) if not len(SCREAMING_SNAKE_CASE ): raise ValueError( """No images found in save path, aborting (did you pass save_intermediate=True to the generate""" """ function?)""" ) if len(SCREAMING_SNAKE_CASE ) == 1: print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" ) _a : Optional[int] =total_duration / len(SCREAMING_SNAKE_CASE ) _a : Optional[Any] =[frame_duration] * len(SCREAMING_SNAKE_CASE ) if extend_frames: _a : Optional[Any] =1.5 _a : Any =3 for file_name in paths: if file_name.endswith(""".png""" ): images.append(imageio.imread(SCREAMING_SNAKE_CASE ) ) imageio.mimsave(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , duration=SCREAMING_SNAKE_CASE ) print(f"gif saved to {output_path}" ) def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any=None , SCREAMING_SNAKE_CASE :List[Any]=None ) -> List[str]: '''simple docstring''' if not (path or img): raise ValueError("""Input either path or tensor""" ) if img is not None: raise NotImplementedError _a : Dict =preprocess(Image.open(SCREAMING_SNAKE_CASE ) , target_image_size=2_5_6 ).to(self.device ) _a : Tuple =preprocess_vqgan(SCREAMING_SNAKE_CASE ) _a , *_a : List[str] =self.vqgan.encode(SCREAMING_SNAKE_CASE ) return z def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Any ) -> Dict: '''simple docstring''' _a : str =self.latent.detach().requires_grad_() _a : Tuple =base_latent + transform_vector if self.quantize: _a , *_a : List[Any] =self.vqgan.quantize(SCREAMING_SNAKE_CASE ) else: _a : Dict =trans_latent return self.vqgan.decode(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int]=None ) -> Any: '''simple docstring''' _a : Optional[Any] =self.clip_preprocessor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =self.clip(**SCREAMING_SNAKE_CASE ) _a : List[str] =clip_outputs.logits_per_image if weights is not None: _a : Tuple =similarity_logits * weights return similarity_logits.sum() def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[int] ) -> List[Any]: '''simple docstring''' _a : Optional[int] =self._get_clip_similarity(pos_prompts["""prompts"""] , SCREAMING_SNAKE_CASE , weights=(1 / pos_prompts["""weights"""]) ) if neg_prompts: _a : Any =self._get_clip_similarity(neg_prompts["""prompts"""] , SCREAMING_SNAKE_CASE , weights=neg_prompts["""weights"""] ) else: _a : Optional[int] =torch.tensor([1] , device=self.device ) _a : int =-torch.log(SCREAMING_SNAKE_CASE ) + torch.log(SCREAMING_SNAKE_CASE ) return loss def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict ) -> Optional[int]: '''simple docstring''' _a : Dict =torch.randn_like(self.latent , requires_grad=SCREAMING_SNAKE_CASE , device=self.device ) _a : List[Any] =torch.optim.Adam([vector] , lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() _a : Tuple =self._add_vector(SCREAMING_SNAKE_CASE ) _a : Dict =loop_post_process(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =self._get_CLIP_loss(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) print("""CLIP loss""" , SCREAMING_SNAKE_CASE ) if self.log: wandb.log({"""CLIP Loss""": clip_loss} ) clip_loss.backward(retain_graph=SCREAMING_SNAKE_CASE ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :int ) -> List[Any]: '''simple docstring''' wandb.init(reinit=SCREAMING_SNAKE_CASE , project="""face-editor""" ) wandb.config.update({"""Positive Prompts""": positive_prompts} ) wandb.config.update({"""Negative Prompts""": negative_prompts} ) wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} ) if image_path: _a : List[Any] =Image.open(SCREAMING_SNAKE_CASE ) _a : str =image.resize((2_5_6, 2_5_6) ) wandb.log("""Original Image""" , wandb.Image(SCREAMING_SNAKE_CASE ) ) def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' if not prompts: return [] _a : List[str] =[] _a : str =[] if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _a : Dict =[prompt.strip() for prompt in prompts.split("""|""" )] for prompt in prompts: if isinstance(SCREAMING_SNAKE_CASE , (tuple, list) ): _a : Tuple =prompt[0] _a : Tuple =float(prompt[1] ) elif ":" in prompt: _a , _a : Any =prompt.split(""":""" ) _a : Union[str, Any] =float(SCREAMING_SNAKE_CASE ) else: _a : int =prompt _a : List[Any] =1.0 processed_prompts.append(SCREAMING_SNAKE_CASE ) weights.append(SCREAMING_SNAKE_CASE ) return { "prompts": processed_prompts, "weights": torch.tensor(SCREAMING_SNAKE_CASE , device=self.device ), } def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[Any]=None , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Optional[Any]=False , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Union[str, Any]=None , ) -> Dict: '''simple docstring''' if image_path: _a : Tuple =self._get_latent(SCREAMING_SNAKE_CASE ) else: _a : Dict =torch.randn(self.latent_dim , device=self.device ) if self.log: self._init_logging(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert pos_prompts, "You must provide at least one positive prompt." _a : List[str] =self.process_prompts(SCREAMING_SNAKE_CASE ) _a : Any =self.process_prompts(SCREAMING_SNAKE_CASE ) if save_final and save_path is None: _a : Any =os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) ) if not os.path.exists(SCREAMING_SNAKE_CASE ): os.makedirs(SCREAMING_SNAKE_CASE ) else: _a : Optional[int] =save_path + """_""" + get_timestamp() os.makedirs(SCREAMING_SNAKE_CASE ) _a : Any =save_path _a : List[str] =self.vqgan.decode(self.latent )[0] if show_intermediate: print("""Original Image""" ) show_pil(custom_to_pil(SCREAMING_SNAKE_CASE ) ) _a : Optional[int] =loop_post_process(SCREAMING_SNAKE_CASE ) for iter, transformed_img in enumerate(self._optimize_CLIP(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ): if show_intermediate: show_pil(SCREAMING_SNAKE_CASE ) if save_intermediate: transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}.png" ) ) if self.log: wandb.log({"""Image""": wandb.Image(SCREAMING_SNAKE_CASE )} ) if show_final: show_pil(SCREAMING_SNAKE_CASE ) if save_final: transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}_final.png" ) )
694
'''simple docstring''' import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str: # Initialise PyTorch model _a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase ) print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) ) _a : Dict =RemBertModel(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # Save pytorch-model print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) ) torch.save(model.state_dict() ,_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--rembert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained RemBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A__: Tuple = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
694
1
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING A__: str = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase__ ) class A__ ( UpperCAmelCase__ ): def __init__( self :Optional[int] , *SCREAMING_SNAKE_CASE :Dict , **SCREAMING_SNAKE_CASE :Optional[Any] ) -> List[str]: '''simple docstring''' super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :List[Any]=None ) -> Union[str, Any]: '''simple docstring''' _a : Tuple ={} _a : Union[str, Any] ={} if prompt is not None: _a : int =prompt if generate_kwargs is not None: _a : Union[str, Any] =generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _a : List[str] ={} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) _a : Tuple =max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, List[str], "Image.Image", List["Image.Image"]] , **SCREAMING_SNAKE_CASE :List[str] ) -> List[Any]: '''simple docstring''' return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any]=None ) -> Optional[Any]: '''simple docstring''' _a : Any =load_image(SCREAMING_SNAKE_CASE ) if prompt is not None: if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise ValueError( f"Received an invalid text input, got - {type(SCREAMING_SNAKE_CASE )} - but expected a single string. " """Note also that one single text can be provided for conditional image to text generation.""" ) _a : Optional[Any] =self.model.config.model_type if model_type == "git": _a : Optional[Any] =self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _a : Optional[int] =self.tokenizer(text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ).input_ids _a : str =[self.tokenizer.cls_token_id] + input_ids _a : str =torch.tensor(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": _a : int =self.image_processor(images=SCREAMING_SNAKE_CASE , header_text=SCREAMING_SNAKE_CASE , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _a : Union[str, Any] =self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _a : List[Any] =self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(SCREAMING_SNAKE_CASE ) else: raise ValueError(f"Model type {model_type} does not support conditional text generation" ) else: _a : Tuple =self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _a : int =None return model_inputs def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple=None ) -> List[str]: '''simple docstring''' # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , SCREAMING_SNAKE_CASE ) and all(x is None for x in model_inputs["""input_ids"""] ) ): _a : Any =None if generate_kwargs is None: _a : Optional[int] ={} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _a : Any =model_inputs.pop(self.model.main_input_name ) _a : List[Any] =self.model.generate(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) return model_outputs def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]: '''simple docstring''' _a : Union[str, Any] =[] for output_ids in model_outputs: _a : List[str] ={ """generated_text""": self.tokenizer.decode( SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE , ) } records.append(SCREAMING_SNAKE_CASE ) return records
694
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A__: Optional[int] = logging.get_logger(__name__) A__: Union[str, Any] = '''▁''' A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''} A__: Optional[int] = { '''vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''', }, '''monolingual_vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''', }, } A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024} class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = VOCAB_FILES_NAMES __UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None: '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it _a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token _a : int ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , ) _a : Dict =vocab_file _a : int =monolingual_vocab_file _a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _a : List[Any] ={} _a : List[str] =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: _a : Optional[Any] =cnt cnt += 1 with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _a : int =line.strip().split()[0] _a : str =len(self.fairseq_tokens_to_ids ) if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: _a : Optional[int] =len(self.fairseq_tokens_to_ids ) _a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self :int ) -> List[Any]: '''simple docstring''' _a : Optional[int] =self.__dict__.copy() _a : Optional[Any] =None _a : str =self.sp_model.serialized_model_proto() return state def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str: '''simple docstring''' _a : List[str] =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _a : Tuple ={} _a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _a : Optional[int] =[self.cls_token_id] _a : int =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _a : List[str] =[self.sep_token_id] _a : int =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __UpperCAmelCase ( self :Optional[int] ) -> int: '''simple docstring''' return len(self.fairseq_ids_to_tokens ) def __UpperCAmelCase ( self :int ) -> Union[str, Any]: '''simple docstring''' _a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict: '''simple docstring''' return self.fairseq_ids_to_tokens[index] def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip() return out_string def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _a : int =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _a : Any =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi: _a : Optional[Any] =self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" ) return out_vocab_file, out_monolingual_vocab_file
694
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> str: return " ".join( """""".join(word[::-1] ) if len(_UpperCAmelCase ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('''Hey wollef sroirraw'''))
694
'''simple docstring''' from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
694
1
'''simple docstring''' from __future__ import annotations from collections import namedtuple def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : float ) -> tuple: _a : Dict =namedtuple("""result""" ,"""name value""" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("""Only one argument must be 0""" ) elif power < 0: raise ValueError( """Power cannot be negative in any electrical/electronics system""" ) elif voltage == 0: return result("""voltage""" ,power / current ) elif current == 0: return result("""current""" ,power / voltage ) elif power == 0: return result("""power""" ,float(round(abs(voltage * current ) ,2 ) ) ) else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]: _a : Dict =current_set.copy() for row_index, row in enumerate(_UpperCAmelCase ): _a : Any =row[0] for column_index, column in enumerate(_UpperCAmelCase ): if magnitude == 0: _a : Any =column continue _a : Union[str, Any] =column / magnitude # Subtract to cancel term _a : Optional[Any] =current_set[0] _a : List[Any] =[first_row] _a : Tuple =current_set[1::] for row in current_set: _a : Any =[] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(_UpperCAmelCase ) continue for column_index in range(len(_UpperCAmelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(_UpperCAmelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: _a : List[str] =final_set[0] _a : Tuple =[] _a : Tuple =[] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _a : str =simplify(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): resultant[i].insert(0 ,current_first_column[i] ) resultant.insert(0 ,_UpperCAmelCase ) _a : List[Any] =resultant return final_set def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list: if len(_UpperCAmelCase ) == 0: raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) _a : str =len(_UpperCAmelCase ) + 1 if any(len(_UpperCAmelCase ) != _length for item in equations ): raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) for row in equations: if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ): raise ValueError("""solve_simultaneous() requires lists of integers""" ) if len(_UpperCAmelCase ) == 1: return [equations[0][-1] / equations[0][0]] _a : str =equations.copy() if any(0 in row for row in data_set ): _a : Optional[int] =data_set.copy() _a : str =[] for row_index, row in enumerate(_UpperCAmelCase ): if 0 not in row: _a : List[Any] =data_set.pop(_UpperCAmelCase ) break if not full_row: raise ValueError("""solve_simultaneous() requires at least 1 full equation""" ) data_set.insert(0 ,_UpperCAmelCase ) _a : Dict =data_set.copy() _a : Any =simplify(_UpperCAmelCase ) _a : Any =simplified[::-1] _a : list =[] for row in simplified: _a : Optional[Any] =row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(_UpperCAmelCase ) == 0: solutions.append(0 ) continue _a : List[str] =temp_row[1::] _a : int =temp_row[::-1] for column_index, column in enumerate(_UpperCAmelCase ): current_solution -= column * solutions[column_index] solutions.append(_UpperCAmelCase ) _a : Tuple =[] for item in solutions: final.append(float(round(_UpperCAmelCase ,5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() A__: int = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
694
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A__: int = logging.get_logger(__name__) A__: Union[str, Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''} A__: int = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, } A__: Optional[int] = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } A__: str = '''▁''' class A__ ( UpperCAmelCase__ ): __UpperCamelCase : List[str] = VOCAB_FILES_NAMES __UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :List[Any]="<s>" , SCREAMING_SNAKE_CASE :str="</s>" , SCREAMING_SNAKE_CASE :Dict="</s>" , SCREAMING_SNAKE_CASE :Dict="<s>" , SCREAMING_SNAKE_CASE :Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE :Any="<pad>" , SCREAMING_SNAKE_CASE :List[Any]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[str] , ) -> None: '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it _a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token _a : Dict ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , ) _a : Dict =vocab_file _a : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) ) _a : List[Any] ={"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} _a : str =len(self.sp_model ) - 1 _a : List[str] ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _a : Any =[self.cls_token_id] _a : Optional[int] =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _a : Tuple =[self.sep_token_id] _a : Union[str, Any] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __UpperCAmelCase ( self :List[Any] ) -> List[Any]: '''simple docstring''' return len(self.sp_model ) def __UpperCAmelCase ( self :List[Any] ) -> Tuple: '''simple docstring''' _a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> Optional[Any]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _a : List[str] =self.sp_model.PieceToId(SCREAMING_SNAKE_CASE ) return spm_id if spm_id else self.unk_token_id def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[Any] ) -> List[Any]: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Tuple ) -> Union[str, Any]: '''simple docstring''' _a : Optional[int] =[] _a : Union[str, Any] ="""""" _a : Optional[Any] =False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE ) + token _a : Tuple =True _a : Optional[int] =[] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE ) _a : List[str] =False out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE ) return out_string.strip() def __getstate__( self :Dict ) -> Optional[Any]: '''simple docstring''' _a : List[str] =self.__dict__.copy() _a : Union[str, Any] =None return state def __setstate__( self :List[str] , SCREAMING_SNAKE_CASE :List[Any] ) -> Dict: '''simple docstring''' _a : Dict =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _a : Tuple ={} _a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _a : Any =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi: _a : Tuple =self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
694
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__: Dict = logging.get_logger(__name__) A__: Optional[int] = { '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = "markuplm" def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any: '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) _a : Any =vocab_size _a : List[str] =hidden_size _a : List[str] =num_hidden_layers _a : Tuple =num_attention_heads _a : Union[str, Any] =hidden_act _a : Tuple =intermediate_size _a : Optional[Any] =hidden_dropout_prob _a : int =attention_probs_dropout_prob _a : Any =max_position_embeddings _a : List[Any] =type_vocab_size _a : List[Any] =initializer_range _a : List[Any] =layer_norm_eps _a : Optional[int] =position_embedding_type _a : List[Any] =use_cache _a : List[str] =classifier_dropout # additional properties _a : int =max_depth _a : Union[str, Any] =max_xpath_tag_unit_embeddings _a : str =max_xpath_subs_unit_embeddings _a : int =tag_pad_id _a : List[Any] =subs_pad_id _a : str =xpath_unit_hidden_size
694
1
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor A__: Union[str, Any] = logging.getLogger(__name__) A__: int = 50 # max width of layer names A__: int = 70 # max width of quantizer names def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> str: _a : List[str] =parser.add_argument_group("""quant_trainer arguments""" ) group.add_argument("""--wprec""" ,type=_UpperCAmelCase ,default=8 ,help="""weight precision""" ) group.add_argument("""--aprec""" ,type=_UpperCAmelCase ,default=8 ,help="""activation precision""" ) group.add_argument("""--quant-per-tensor""" ,action="""store_true""" ,help="""per tensor weight scaling""" ) group.add_argument("""--quant-disable""" ,action="""store_true""" ,help="""disable all quantizers""" ) group.add_argument("""--quant-disable-embeddings""" ,action="""store_true""" ,help="""disable all embeddings quantizers""" ) group.add_argument("""--quant-disable-keyword""" ,type=_UpperCAmelCase ,nargs="""+""" ,help="""disable quantizers by keyword""" ) group.add_argument("""--quant-disable-layer-module""" ,type=_UpperCAmelCase ,help="""disable quantizers by keyword under layer.""" ) group.add_argument("""--quant-enable-layer-module""" ,type=_UpperCAmelCase ,help="""enable quantizers by keyword under layer""" ) group.add_argument("""--calibrator""" ,default="""max""" ,help="""which quantization range calibrator to use""" ) group.add_argument("""--percentile""" ,default=_UpperCAmelCase ,type=_UpperCAmelCase ,help="""percentile for PercentileCalibrator""" ) group.add_argument("""--fuse-qkv""" ,action="""store_true""" ,help="""use the same scale factor for qkv""" ) group.add_argument("""--clip-gelu""" ,metavar="""N""" ,type=_UpperCAmelCase ,help="""clip gelu output maximum value to N""" ) group.add_argument( """--recalibrate-weights""" ,action="""store_true""" ,help=( """recalibrate weight amaxes by taking the max of the weights.""" """ amaxes will be computed with the current quantization granularity (axis).""" ) ,) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> List[str]: if args.calibrator == "max": _a : int ="""max""" elif args.calibrator == "percentile": if args.percentile is None: raise ValueError("""Specify --percentile when using percentile calibrator""" ) _a : Any ="""histogram""" elif args.calibrator == "mse": _a : Union[str, Any] ="""histogram""" else: raise ValueError(F"Invalid calibrator {args.calibrator}" ) _a : str =QuantDescriptor(num_bits=args.aprec ,calib_method=_UpperCAmelCase ) _a : int =QuantDescriptor(num_bits=args.wprec ,axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_UpperCAmelCase ) quant_nn.QuantLinear.set_default_quant_desc_weight(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Union[str, Any]=False ,_UpperCAmelCase : Optional[Any]=False ) -> int: logger.info("""Configuring Model for Quantization""" ) logger.info(F"using quantization package {pytorch_quantization.__file__}" ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_UpperCAmelCase ,["""embeddings"""] ,which="""weight""" ,_disabled=_UpperCAmelCase ) if args.quant_disable: set_quantizer_by_name(_UpperCAmelCase ,[""""""] ,_disabled=_UpperCAmelCase ) if args.quant_disable_keyword: set_quantizer_by_name(_UpperCAmelCase ,args.quant_disable_keyword ,_disabled=_UpperCAmelCase ) if args.quant_disable_layer_module: set_quantizer_by_name(_UpperCAmelCase ,[R"""layer.\d+.""" + args.quant_disable_layer_module] ,_disabled=_UpperCAmelCase ) if args.quant_enable_layer_module: set_quantizer_by_name(_UpperCAmelCase ,[R"""layer.\d+.""" + args.quant_enable_layer_module] ,_disabled=_UpperCAmelCase ) if args.recalibrate_weights: recalibrate_weights(_UpperCAmelCase ) if args.fuse_qkv: fuse_qkv(_UpperCAmelCase ,_UpperCAmelCase ) if args.clip_gelu: clip_gelu(_UpperCAmelCase ,args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> Optional[Any]: logger.info("""Enabling Calibration""" ) for name, module in model.named_modules(): if name.endswith("""_quantizer""" ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F"{name:80}: {module}" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ) -> Union[str, Any]: logger.info("""Loading calibrated amax""" ) for name, module in model.named_modules(): if name.endswith("""_quantizer""" ): if module._calibrator is not None: if isinstance(module._calibrator ,calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax("""percentile""" ,percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : int ) -> Optional[Any]: def fusea(_UpperCAmelCase : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : str ): for mod in [qq, qk, qv]: if not hasattr(_UpperCAmelCase ,"""_amax""" ): print(""" WARNING: NO AMAX BUFFER""" ) return _a : Optional[int] =qq._amax.detach().item() _a : Optional[Any] =qk._amax.detach().item() _a : List[str] =qv._amax.detach().item() _a : Dict =max(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) qq._amax.fill_(_UpperCAmelCase ) qk._amax.fill_(_UpperCAmelCase ) qv._amax.fill_(_UpperCAmelCase ) logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" ) for name, mod in model.named_modules(): if name.endswith(""".attention.self""" ): logger.info(F"FUSE_QKV: {name:{name_width}}" ) fusea(mod.matmul_q_input_quantizer ,mod.matmul_k_input_quantizer ,mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer ,mod.key._weight_quantizer ,mod.value._weight_quantizer ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ) -> Tuple: for name, mod in model.named_modules(): if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ): _a : Any =mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_UpperCAmelCase ) _a : Tuple =mod._input_quantizer._amax.data.detach().item() logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ) -> Optional[int]: for name, mod in model.named_modules(): if hasattr(_UpperCAmelCase ,"""_weight_quantizer""" ) and mod._weight_quantizer.axis is not None: _a : Tuple =mod.weight.shape[0] _a : List[Any] =mod._weight_quantizer._amax.detach() _a : Optional[int] =torch.ones(_UpperCAmelCase ,dtype=amax.dtype ,device=amax.device ) * amax print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: for name, mod in model.named_modules(): if hasattr(_UpperCAmelCase ,"""_weight_quantizer""" ): if not hasattr(mod.weight_quantizer ,"""_amax""" ): print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _a : Tuple =set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _a : Union[str, Any] =set(range(len(mod.weight.size() ) ) ) - axis_set _a : List[str] =pytorch_quantization.utils.reduce_amax(mod.weight ,axis=_UpperCAmelCase ,keepdims=_UpperCAmelCase ).detach() logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" ) _a : str =amax def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int]=25 ,_UpperCAmelCase : List[Any]=180 ,_UpperCAmelCase : Optional[Any]=None ) -> Dict: if ignore is None: _a : Any =[] elif not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Union[str, Any] =[ignore] _a : int =0 for name, mod in model.named_modules(): if not hasattr(_UpperCAmelCase ,"""weight""" ): continue _a : Any =max(_UpperCAmelCase ,len(_UpperCAmelCase ) ) for name, mod in model.named_modules(): _a : Tuple =getattr(_UpperCAmelCase ,"""_input_quantizer""" ,_UpperCAmelCase ) _a : Optional[int] =getattr(_UpperCAmelCase ,"""_weight_quantizer""" ,_UpperCAmelCase ) if not hasattr(_UpperCAmelCase ,"""weight""" ): continue if type(_UpperCAmelCase ) in ignore: continue if [True for s in ignore if type(_UpperCAmelCase ) is str and s in name]: continue _a : Dict =F"Act:{input_q.extra_repr()}" _a : Tuple =F"Wgt:{weight_q.extra_repr()}" _a : Dict =F"{name:{name_width}} {act_str} {wgt_str}" if len(_UpperCAmelCase ) <= line_width: logger.info(_UpperCAmelCase ) else: logger.info(F"{name:{name_width}} {act_str}" ) logger.info(F"{' ':{name_width}} {wgt_str}" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: _a : Optional[Any] =0 for name, mod in model.named_modules(): if isinstance(_UpperCAmelCase ,pytorch_quantization.nn.TensorQuantizer ): print(F"{name:80} {mod}" ) count += 1 print(F"{count} TensorQuantizers found in model" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[Any] ) -> Tuple: _a : Union[str, Any] =getattr(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) if quantizer_mod is not None: assert hasattr(_UpperCAmelCase ,_UpperCAmelCase ) setattr(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) else: logger.warning(F"{name} has no {quantizer}" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Dict ,_UpperCAmelCase : int="both" ,**_UpperCAmelCase : Tuple ) -> Dict: _a : Optional[Any] =F"Warning: changing {which} quantizers of {name:{qname_width}}" for k, v in kwargs.items(): s += F" {k}={v}" if which in ["input", "both"]: set_quantizer(_UpperCAmelCase ,_UpperCAmelCase ,"""_input_quantizer""" ,_UpperCAmelCase ,_UpperCAmelCase ) if which in ["weight", "both"]: set_quantizer(_UpperCAmelCase ,_UpperCAmelCase ,"""_weight_quantizer""" ,_UpperCAmelCase ,_UpperCAmelCase ) logger.info(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Any ,**_UpperCAmelCase : Any ) -> Dict: for name, mod in model.named_modules(): if hasattr(_UpperCAmelCase ,"""_input_quantizer""" ) or hasattr(_UpperCAmelCase ,"""_weight_quantizer""" ): for n in names: if re.search(_UpperCAmelCase ,_UpperCAmelCase ): set_quantizers(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase ) elif name.endswith("""_quantizer""" ): for n in names: if re.search(_UpperCAmelCase ,_UpperCAmelCase ): _a : Optional[Any] =F"Warning: changing {name:{name_width}}" for k, v in kwargs.items(): s += F" {k}={v}" setattr(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) logger.info(_UpperCAmelCase )
694
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''') def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict: hf_model.apply_weight_norm() _a : Any =checkpoint["""input_conv.weight_g"""] _a : Union[str, Any] =checkpoint["""input_conv.weight_v"""] _a : Optional[int] =checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): _a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"] _a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"] _a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): _a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"] _a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"] _a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"] _a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"] _a : Dict =checkpoint["""output_conv.1.weight_g"""] _a : str =checkpoint["""output_conv.1.weight_v"""] _a : Union[str, Any] =checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]: if config_path is not None: _a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase ) else: _a : str =SpeechTaHifiGanConfig() _a : Tuple =SpeechTaHifiGan(_UpperCAmelCase ) _a : int =torch.load(_UpperCAmelCase ) load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase ) _a : Dict =np.load(_UpperCAmelCase ) _a : Union[str, Any] =stats[0].reshape(-1 ) _a : Any =stats[1].reshape(-1 ) _a : Tuple =torch.from_numpy(_UpperCAmelCase ).float() _a : List[str] =torch.from_numpy(_UpperCAmelCase ).float() model.save_pretrained(_UpperCAmelCase ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) A__: Tuple = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
694
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class A__ ( unittest.TestCase ): def __UpperCAmelCase ( self :Dict ) -> Tuple: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __UpperCAmelCase ( self :List[Any] ) -> Dict: '''simple docstring''' _a : int =1 _a : Dict =3 _a : List[Any] =(3_2, 3_2) _a : Dict =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE ) return image @property def __UpperCAmelCase ( self :Any ) -> List[Any]: '''simple docstring''' torch.manual_seed(0 ) _a : int =UNetaDConditionModel( block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=SCREAMING_SNAKE_CASE , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , ) return model @property def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) _a : str =AutoencoderKL( block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def __UpperCAmelCase ( self :str ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) _a : Dict =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , ) return CLIPTextModel(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Tuple ) -> List[str]: '''simple docstring''' _a : List[str] ="""cpu""" # ensure determinism for the device-dependent torch.Generator _a : Any =self.dummy_cond_unet_upscale _a : Optional[Any] =DDPMScheduler() _a : Optional[Any] =DDIMScheduler(prediction_type="""v_prediction""" ) _a : Any =self.dummy_vae _a : Any =self.dummy_text_encoder _a : str =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _a : Any =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _a : int =Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((6_4, 6_4) ) # make sure here that pndm scheduler skips prk _a : Tuple =StableDiffusionUpscalePipeline( unet=SCREAMING_SNAKE_CASE , low_res_scheduler=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , max_noise_level=3_5_0 , ) _a : str =sd_pipe.to(SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) _a : Optional[int] ="""A painting of a squirrel eating a burger""" _a : Optional[int] =torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(0 ) _a : Any =sd_pipe( [prompt] , image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="""np""" , ) _a : Optional[int] =output.images _a : Dict =torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(0 ) _a : List[Any] =sd_pipe( [prompt] , image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="""np""" , return_dict=SCREAMING_SNAKE_CASE , )[0] _a : str =image[0, -3:, -3:, -1] _a : List[str] =image_from_tuple[0, -3:, -3:, -1] _a : Optional[Any] =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) _a : int =np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' _a : Optional[Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator _a : Any =self.dummy_cond_unet_upscale _a : List[str] =DDPMScheduler() _a : Optional[int] =DDIMScheduler(prediction_type="""v_prediction""" ) _a : str =self.dummy_vae _a : List[str] =self.dummy_text_encoder _a : Dict =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _a : Optional[int] =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _a : Tuple =Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((6_4, 6_4) ) # make sure here that pndm scheduler skips prk _a : str =StableDiffusionUpscalePipeline( unet=SCREAMING_SNAKE_CASE , low_res_scheduler=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , max_noise_level=3_5_0 , ) _a : str =sd_pipe.to(SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) _a : Tuple ="""A painting of a squirrel eating a burger""" _a : List[Any] =sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="""np""" , ) _a : Any =output.images assert image.shape[0] == 2 _a : Optional[Any] =torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(0 ) _a : Dict =sd_pipe( [prompt] , image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="""np""" , ) _a : Tuple =output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def __UpperCAmelCase ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' _a : int =self.dummy_cond_unet_upscale _a : Dict =DDPMScheduler() _a : Any =DDIMScheduler(prediction_type="""v_prediction""" ) _a : str =self.dummy_vae _a : Dict =self.dummy_text_encoder _a : List[Any] =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _a : List[str] =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _a : Optional[int] =Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((6_4, 6_4) ) # put models in fp16, except vae as it overflows in fp16 _a : Any =unet.half() _a : Tuple =text_encoder.half() # make sure here that pndm scheduler skips prk _a : str =StableDiffusionUpscalePipeline( unet=SCREAMING_SNAKE_CASE , low_res_scheduler=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , max_noise_level=3_5_0 , ) _a : Optional[int] =sd_pipe.to(SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) _a : Any ="""A painting of a squirrel eating a burger""" _a : Optional[Any] =torch.manual_seed(0 ) _a : int =sd_pipe( [prompt] , image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""np""" , ).images _a : Optional[Any] =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCAmelCase ( self :int ) -> int: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self :List[str] ) -> Dict: '''simple docstring''' _a : Dict =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _a : List[str] =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat.npy""" ) _a : str ="""stabilityai/stable-diffusion-x4-upscaler""" _a : Union[str, Any] =StableDiffusionUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE ) pipe.to(SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) pipe.enable_attention_slicing() _a : str ="""a cat sitting on a park bench""" _a : Optional[Any] =torch.manual_seed(0 ) _a : Union[str, Any] =pipe( prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , output_type="""np""" , ) _a : str =output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 1e-3 def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' _a : Optional[int] =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _a : List[Any] =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat_fp16.npy""" ) _a : str ="""stabilityai/stable-diffusion-x4-upscaler""" _a : int =StableDiffusionUpscalePipeline.from_pretrained( SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , ) pipe.to(SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) pipe.enable_attention_slicing() _a : List[str] ="""a cat sitting on a park bench""" _a : Tuple =torch.manual_seed(0 ) _a : List[Any] =pipe( prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , output_type="""np""" , ) _a : Optional[int] =output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5e-1 def __UpperCAmelCase ( self :int ) -> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _a : List[str] =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _a : str ="""stabilityai/stable-diffusion-x4-upscaler""" _a : Union[str, Any] =StableDiffusionUpscalePipeline.from_pretrained( SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , ) pipe.to(SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _a : Any ="""a cat sitting on a park bench""" _a : Union[str, Any] =torch.manual_seed(0 ) _a : List[Any] =pipe( prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=5 , output_type="""np""" , ) _a : Any =torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 1_0**9
694
'''simple docstring''' class A__ : def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]: '''simple docstring''' _a : List[str] =None _a : Optional[Any] =None _a : str =graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Optional[int] =len(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =None def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any: '''simple docstring''' if sources is int: _a : Tuple =[sources] if sinks is int: _a : Optional[int] =[sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return _a : Union[str, Any] =sources[0] _a : Tuple =sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: _a : Tuple =0 for i in sources: max_input_flow += sum(self.graph[i] ) _a : List[Any] =len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _a : Any =max_input_flow _a : List[str] =0 _a : List[str] =len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _a : str =max_input_flow _a : Optional[Any] =size - 1 def __UpperCAmelCase ( self :Optional[int] ) -> Tuple: '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int: '''simple docstring''' _a : Tuple =algorithm(self ) class A__ : def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict: '''simple docstring''' _a : List[str] =flow_network _a : List[Any] =flow_network.verticesCount _a : str =flow_network.sourceIndex _a : str =flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _a : List[Any] =flow_network.graph _a : Optional[int] =False def __UpperCAmelCase ( self :List[Any] ) -> List[str]: '''simple docstring''' if not self.executed: self._algorithm() _a : Any =True def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]: '''simple docstring''' pass class A__ ( UpperCAmelCase__ ): def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result _a : List[Any] =-1 def __UpperCAmelCase ( self :Dict ) -> Tuple: '''simple docstring''' if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class A__ ( UpperCAmelCase__ ): def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) _a : int =[[0] * self.verticies_count for i in range(self.verticies_count )] _a : Union[str, Any] =[0] * self.verticies_count _a : Optional[Any] =[0] * self.verticies_count def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]: '''simple docstring''' _a : int =self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _a : Tuple =[ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _a : List[Any] =0 while i < len(SCREAMING_SNAKE_CASE ): _a : Any =vertices_list[i] _a : str =self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) _a : List[str] =0 else: i += 1 _a : Optional[int] =sum(self.preflow[self.source_index] ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]: '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' _a : List[str] =min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]: '''simple docstring''' _a : int =None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _a : Optional[Any] =self.heights[to_index] if min_height is not None: _a : Any =min_height + 1 if __name__ == "__main__": A__: str = [0] A__: Optional[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network A__: Union[str, Any] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate A__: List[str] = flow_network.find_maximum_flow() print(F"maximum flow is {maximum_flow}")
694
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int: _a : Optional[Any] =[] _a , _a : Union[str, Any] =0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_UpperCAmelCase ) _a , _a : Optional[Any] =b, a + b return sum(_UpperCAmelCase ) if __name__ == "__main__": print(F"{solution() = }")
694
'''simple docstring''' A__: Optional[int] = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] A__: int = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
694
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A__: Optional[int] = { '''configuration_mobilenet_v2''': [ '''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileNetV2Config''', '''MobileNetV2OnnxConfig''', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Optional[Any] = ['''MobileNetV2FeatureExtractor'''] A__: Dict = ['''MobileNetV2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: List[str] = [ '''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileNetV2ForImageClassification''', '''MobileNetV2ForSemanticSegmentation''', '''MobileNetV2Model''', '''MobileNetV2PreTrainedModel''', '''load_tf_weights_in_mobilenet_v2''', ] if TYPE_CHECKING: from .configuration_mobilenet_va import ( MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetVaConfig, MobileNetVaOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor from .image_processing_mobilenet_va import MobileNetVaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilenet_va import ( MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel, MobileNetVaPreTrainedModel, load_tf_weights_in_mobilenet_va, ) else: import sys A__: List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F"{price_plus_tax(100, 0.25) = }") print(F"{price_plus_tax(125.50, 0.05) = }")
694
1
'''simple docstring''' import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(UpperCAmelCase__ ) , "Tatoeba directory does not exist." ) class A__ ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self :Tuple ) -> int: '''simple docstring''' _a : List[Any] =tempfile.mkdtemp() return TatoebaConverter(save_dir=SCREAMING_SNAKE_CASE ) @slow def __UpperCAmelCase ( self :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' self.resolver.convert_models(["""heb-eng"""] ) @slow def __UpperCAmelCase ( self :Optional[Any] ) -> Dict: '''simple docstring''' _a , _a : List[str] =self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=SCREAMING_SNAKE_CASE ) assert mmeta["long_pair"] == "heb-eng"
694
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class A__ ( unittest.TestCase ): def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple: '''simple docstring''' _a : Optional[Any] =parent _a : List[str] =batch_size _a : List[str] =seq_length _a : List[Any] =is_training _a : Optional[int] =use_attention_mask _a : List[Any] =use_token_type_ids _a : List[Any] =use_labels _a : Optional[Any] =vocab_size _a : str =hidden_size _a : List[Any] =num_hidden_layers _a : List[Any] =num_attention_heads _a : Union[str, Any] =intermediate_size _a : int =hidden_act _a : List[str] =hidden_dropout_prob _a : Optional[int] =attention_probs_dropout_prob _a : Dict =max_position_embeddings _a : Any =type_vocab_size _a : str =type_sequence_label_size _a : str =initializer_range _a : List[str] =num_choices def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict: '''simple docstring''' _a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : Dict =None if self.use_attention_mask: _a : Any =random_attention_mask([self.batch_size, self.seq_length] ) _a : Optional[int] =None if self.use_token_type_ids: _a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _a : Union[str, Any] =RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' _a : Tuple =self.prepare_config_and_inputs() _a , _a , _a , _a : List[Any] =config_and_inputs _a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def __UpperCAmelCase ( self :int ) -> str: '''simple docstring''' _a : List[Any] =self.prepare_config_and_inputs() _a , _a , _a , _a : Optional[int] =config_and_inputs _a : Tuple =True _a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : Union[str, Any] = True __UpperCamelCase : Dict = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCAmelCase ( self :List[str] ) -> Optional[int]: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self ) @slow def __UpperCAmelCase ( self :str ) -> int: '''simple docstring''' for model_class_name in self.all_model_classes: _a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Dict =model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_flax class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Any ) -> str: '''simple docstring''' _a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Dict =model(SCREAMING_SNAKE_CASE )[0] _a : List[Any] =[1, 1_1, 5_0_2_6_5] self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. _a : Any =np.array( [[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def __UpperCAmelCase ( self :int ) -> int: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. _a : str =np.array( [[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
694
1
'''simple docstring''' from math import factorial def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 20 ) -> int: _a : Optional[int] =2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... _a : Any =n // 2 return int(factorial(_UpperCAmelCase ) / (factorial(_UpperCAmelCase ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: A__: Optional[int] = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number.''')
694
'''simple docstring''' import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A__: Tuple = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]: return field(default_factory=lambda: default ,metadata=_UpperCAmelCase ) @dataclass class A__ : __UpperCamelCase : List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) __UpperCamelCase : List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) __UpperCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) __UpperCamelCase : str = field( default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , ) __UpperCamelCase : str = field( default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , ) __UpperCamelCase : str = field( default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , ) __UpperCamelCase : str = field( default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , ) __UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' warnings.warn( f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils" """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[str]: '''simple docstring''' if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
694
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool: return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class A__ ( UpperCAmelCase__ ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]: '''simple docstring''' _a : int =1.0 if scale is None else scale _a : Optional[Any] =0.0 if loc is None else loc super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] ) @property def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return self.base_dist.mean * self.scale + self.loc @property def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return self.base_dist.variance * self.scale**2 @property def __UpperCAmelCase ( self :Any ) -> List[str]: '''simple docstring''' return self.variance.sqrt() class A__ ( nn.Module ): def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE ) _a : Tuple =args_dim _a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] ) _a : Dict =domain_map def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]: '''simple docstring''' _a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj] return self.domain_map(*SCREAMING_SNAKE_CASE ) class A__ ( nn.Module ): def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int: '''simple docstring''' super().__init__() _a : List[Any] =function def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]: '''simple docstring''' return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) class A__ : __UpperCamelCase : type __UpperCamelCase : int __UpperCamelCase : Dict[str, int] def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None: '''simple docstring''' _a : Any =dim _a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim} def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]: '''simple docstring''' if self.dim == 1: return self.distribution_class(*SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution: '''simple docstring''' _a : str =self._base_distribution(SCREAMING_SNAKE_CASE ) if loc is None and scale is None: return distr else: return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim ) @property def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' return () if self.dim == 1 else (self.dim,) @property def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return len(self.event_shape ) @property def __UpperCAmelCase ( self :Any ) -> float: '''simple docstring''' return 0.0 def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module: '''simple docstring''' return ParameterProjection( in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any: '''simple docstring''' raise NotImplementedError() @staticmethod def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor: '''simple docstring''' return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0 class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __UpperCamelCase : type = StudentT @classmethod def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]: '''simple docstring''' _a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) _a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1} __UpperCamelCase : type = Normal @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict: '''simple docstring''' _a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1} __UpperCamelCase : type = NegativeBinomial @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]: '''simple docstring''' _a : int =cls.squareplus(SCREAMING_SNAKE_CASE ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution: '''simple docstring''' _a , _a : Any =distr_args if self.dim == 1: return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution: '''simple docstring''' _a , _a : Optional[int] =distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
694
1
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_UpperCAmelCase ): requests.request("""GET""" ,"""https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 ) @pytest.mark.integration def SCREAMING_SNAKE_CASE_ ( ) -> int: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" ,"""https://huggingface.co""" ) def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_UpperCAmelCase ): http_head("""https://huggingface.co""" )
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number | (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number & ~(1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number ^ (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool: return ((number >> position) & 1) == 1 def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class A__ ( unittest.TestCase ): def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :List[Any]=1_8 , SCREAMING_SNAKE_CASE :Tuple=3_0 , SCREAMING_SNAKE_CASE :Optional[Any]=4_0_0 , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :Optional[Any]=None , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :int=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE :Optional[int]=[0.5, 0.5, 0.5] , ) -> str: '''simple docstring''' _a : Tuple =size if size is not None else {"""height""": 1_8, """width""": 1_8} _a : Optional[Any] =parent _a : List[str] =batch_size _a : int =num_channels _a : Union[str, Any] =image_size _a : List[Any] =min_resolution _a : Optional[Any] =max_resolution _a : Dict =do_resize _a : Optional[int] =size _a : Union[str, Any] =do_normalize _a : List[str] =image_mean _a : Optional[int] =image_std def __UpperCAmelCase ( self :Dict ) -> List[str]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : List[Any] = DPTImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self :List[Any] ) -> int: '''simple docstring''' _a : Tuple =DPTImageProcessingTester(self ) @property def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' _a : Union[str, Any] =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """image_mean""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """image_std""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) ) def __UpperCAmelCase ( self :str ) -> Tuple: '''simple docstring''' _a : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} ) _a : str =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} ) def __UpperCAmelCase ( self :int ) -> List[Any]: '''simple docstring''' # Initialize image_processing _a : Tuple =self.image_processing_class(**self.image_processor_dict ) # create random PIL images _a : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _a : Union[str, Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _a : Optional[Any] =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' # Initialize image_processing _a : Tuple =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _a : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _a : List[str] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _a : str =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def __UpperCAmelCase ( self :int ) -> Tuple: '''simple docstring''' # Initialize image_processing _a : Dict =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _a : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _a : Optional[int] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _a : Optional[Any] =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None: if point: if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): for item in point: if not isinstance(_UpperCAmelCase ,(int, float) ): _a : str =( """Expected a list of numbers as input, found """ F"{type(_UpperCAmelCase ).__name__}" ) raise TypeError(_UpperCAmelCase ) else: _a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}" raise TypeError(_UpperCAmelCase ) else: raise ValueError("""Missing an input""" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class A__ : def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=9_9 , SCREAMING_SNAKE_CASE :Optional[int]=1_3 , SCREAMING_SNAKE_CASE :Union[str, Any]=7 , SCREAMING_SNAKE_CASE :str=9 , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=False , SCREAMING_SNAKE_CASE :Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE :List[Any]=5 , SCREAMING_SNAKE_CASE :Tuple=4 , SCREAMING_SNAKE_CASE :str=3_7 , SCREAMING_SNAKE_CASE :Optional[int]=8 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.002 , SCREAMING_SNAKE_CASE :List[Any]=1 , SCREAMING_SNAKE_CASE :str=0 , SCREAMING_SNAKE_CASE :Union[str, Any]=0 , SCREAMING_SNAKE_CASE :List[Any]=None , SCREAMING_SNAKE_CASE :Tuple=None , ) -> int: '''simple docstring''' _a : Optional[Any] =parent _a : Optional[int] =batch_size _a : Union[str, Any] =encoder_seq_length _a : int =decoder_seq_length # For common tests _a : Dict =self.decoder_seq_length _a : Any =is_training _a : List[str] =use_attention_mask _a : Dict =use_labels _a : Any =vocab_size _a : Optional[int] =hidden_size _a : List[str] =num_hidden_layers _a : str =num_attention_heads _a : Optional[Any] =d_ff _a : Any =relative_attention_num_buckets _a : Any =dropout_rate _a : str =initializer_factor _a : str =eos_token_id _a : Any =pad_token_id _a : Tuple =decoder_start_token_id _a : Optional[Any] =None _a : List[str] =decoder_layers def __UpperCAmelCase ( self :List[Any] ) -> Optional[Any]: '''simple docstring''' return TaConfig.from_pretrained("""google/umt5-base""" ) def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :List[Any]=None , ) -> int: '''simple docstring''' if attention_mask is None: _a : Dict =input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: _a : List[Any] =decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: _a : Optional[int] =torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE ) if decoder_head_mask is None: _a : Any =torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE ) if cross_attn_head_mask is None: _a : List[str] =torch.ones( config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __UpperCAmelCase ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' _a : Union[str, Any] =ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) _a : List[Any] =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _a : int =input_ids.clamp(self.pad_token_id + 1 ) _a : Union[str, Any] =decoder_input_ids.clamp(self.pad_token_id + 1 ) _a : Any =self.get_config() _a : Union[str, Any] =config.num_attention_heads _a : Any =self.prepare_inputs_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return config, input_dict def __UpperCAmelCase ( self :Any ) -> Any: '''simple docstring''' _a , _a : List[str] =self.prepare_config_and_inputs() return config, inputs_dict def __UpperCAmelCase ( self :str ) -> Any: '''simple docstring''' return TaConfig( vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __UpperCAmelCase ( self :Union[str, Any] ) -> List[Any]: '''simple docstring''' return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[int] , ) -> str: '''simple docstring''' _a : Optional[Any] =UMTaModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : Union[str, Any] =model( input_ids=SCREAMING_SNAKE_CASE , decoder_input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , decoder_attention_mask=SCREAMING_SNAKE_CASE , ) _a : Optional[Any] =model(input_ids=SCREAMING_SNAKE_CASE , decoder_input_ids=SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =result.last_hidden_state _a : Optional[Any] =result.past_key_values _a : List[str] =result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(SCREAMING_SNAKE_CASE ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :List[Any] , ) -> Optional[int]: '''simple docstring''' _a : Optional[int] =UMTaModel(config=SCREAMING_SNAKE_CASE ).get_decoder().to(SCREAMING_SNAKE_CASE ).eval() # first forward pass _a : Optional[Any] =model(SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE ) _a : Dict =model(SCREAMING_SNAKE_CASE ) _a : Tuple =model(SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) + 1 ) _a , _a : Union[str, Any] =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _a : int =ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and _a : Optional[Any] =torch.cat([input_ids, next_tokens] , dim=-1 ) _a : Optional[int] =model(SCREAMING_SNAKE_CASE )["""last_hidden_state"""] _a : str =model(SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE )["""last_hidden_state"""] # select random slice _a : Union[str, Any] =ids_tensor((1,) , output_from_past.shape[-1] ).item() _a : str =output_from_no_past[:, -1, random_slice_idx].detach() _a : str =output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any] , ) -> Union[str, Any]: '''simple docstring''' _a : List[str] =UMTaModel(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).half().eval() _a : Tuple =model(**SCREAMING_SNAKE_CASE )["""last_hidden_state"""] self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE ).any().item() ) @require_torch class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : str = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) __UpperCamelCase : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () __UpperCamelCase : Optional[Any] = ( { "conversational": UMTaForConditionalGeneration, "feature-extraction": UMTaModel, "summarization": UMTaForConditionalGeneration, "text2text-generation": UMTaForConditionalGeneration, "translation": UMTaForConditionalGeneration, "question-answering": UMTaForQuestionAnswering, } if is_torch_available() else {} ) __UpperCamelCase : str = True __UpperCamelCase : List[Any] = False __UpperCamelCase : Tuple = False __UpperCamelCase : Union[str, Any] = True __UpperCamelCase : Dict = True # The small UMT5 model needs higher percentages for CPU/MP tests __UpperCamelCase : Union[str, Any] = [0.8, 0.9] def __UpperCAmelCase ( self :Any ) -> str: '''simple docstring''' _a : Dict =UMTaModelTester(self ) @unittest.skip("""Test has a segmentation fault on torch 1.8.0""" ) def __UpperCAmelCase ( self :Optional[int] ) -> int: '''simple docstring''' _a : Optional[Any] =self.model_tester.prepare_config_and_inputs() _a : str =UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( SCREAMING_SNAKE_CASE , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"{tmpdirname}/t5_test.onnx" , export_params=SCREAMING_SNAKE_CASE , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , ) @unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" ) def __UpperCAmelCase ( self :Tuple ) -> Any: '''simple docstring''' _a : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Any ) -> Any: '''simple docstring''' _a : Any =["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""] _a : Dict =self.model_tester.prepare_config_and_inputs() _a : Optional[int] =config_and_inputs[0] _a : int =UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE ).eval() model.to(SCREAMING_SNAKE_CASE ) _a : str ={ """head_mask""": torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE ), """decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE ), """cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE ), } for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE , head_masking.items() ): _a : Optional[Any] ={name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": _a : Union[str, Any] =torch.ones( config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE ) _a : List[Any] =model.generate( config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE , return_dict_in_generate=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) # We check the state of decoder_attentions and cross_attentions just from the last step _a : int =out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" ) def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]: '''simple docstring''' pass @require_torch @require_sentencepiece @require_tokenizers class A__ ( unittest.TestCase ): @slow @unittest.skip( """Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" ) def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]: '''simple docstring''' _a : Any =UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) _a : Optional[Any] =AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=SCREAMING_SNAKE_CASE , legacy=SCREAMING_SNAKE_CASE ) _a : Tuple =[ """Bonjour monsieur <extra_id_0> bien <extra_id_1>.""", """No se como puedo <extra_id_0>.""", """This is the reason why we <extra_id_0> them.""", """The <extra_id_0> walks in <extra_id_1>, seats""", """A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""", ] _a : Union[str, Any] =tokenizer(SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE ).input_ids # fmt: off _a : Dict =torch.tensor( [ [ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1], ] ) # fmt: on torch.testing.assert_allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Dict =model.generate(input_ids.to(SCREAMING_SNAKE_CASE ) ) _a : Optional[Any] =[ """<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""", """<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", ] _a : Optional[int] =tokenizer.batch_decode(SCREAMING_SNAKE_CASE ) self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
694
'''simple docstring''' from __future__ import annotations class A__ : def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None: '''simple docstring''' _a : int =order # a_{0} ... a_{k} _a : Optional[Any] =[1.0] + [0.0] * order # b_{0} ... b_{k} _a : Tuple =[1.0] + [0.0] * order # x[n-1] ... x[n-k] _a : List[Any] =[0.0] * self.order # y[n-1] ... y[n-k] _a : Tuple =[0.0] * self.order def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None: '''simple docstring''' if len(SCREAMING_SNAKE_CASE ) < self.order: _a : int =[1.0, *a_coeffs] if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : int =( f"Expected a_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : Optional[Any] =( f"Expected b_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) _a : List[str] =a_coeffs _a : Union[str, Any] =b_coeffs def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float: '''simple docstring''' _a : str =0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) _a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0] _a : str =self.input_history[:-1] _a : Optional[Any] =self.output_history[:-1] _a : Optional[int] =sample _a : Tuple =result return result
694
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig A__: List[str] = logging.get_logger(__name__) A__: Any = { '''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''', # See all DPT models at https://huggingface.co/models?filter=dpt } class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Any = "dpt" def __init__( self :Tuple , SCREAMING_SNAKE_CASE :int=7_6_8 , SCREAMING_SNAKE_CASE :Optional[int]=1_2 , SCREAMING_SNAKE_CASE :Dict=1_2 , SCREAMING_SNAKE_CASE :List[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE :Any="gelu" , SCREAMING_SNAKE_CASE :int=0.0 , SCREAMING_SNAKE_CASE :str=0.0 , SCREAMING_SNAKE_CASE :List[str]=0.02 , SCREAMING_SNAKE_CASE :Union[str, Any]=1e-12 , SCREAMING_SNAKE_CASE :Any=3_8_4 , SCREAMING_SNAKE_CASE :Any=1_6 , SCREAMING_SNAKE_CASE :List[Any]=3 , SCREAMING_SNAKE_CASE :Dict=False , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :str=[2, 5, 8, 1_1] , SCREAMING_SNAKE_CASE :List[str]="project" , SCREAMING_SNAKE_CASE :str=[4, 2, 1, 0.5] , SCREAMING_SNAKE_CASE :Optional[Any]=[9_6, 1_9_2, 3_8_4, 7_6_8] , SCREAMING_SNAKE_CASE :Tuple=2_5_6 , SCREAMING_SNAKE_CASE :List[str]=-1 , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :str=0.4 , SCREAMING_SNAKE_CASE :str=2_5_5 , SCREAMING_SNAKE_CASE :int=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=[1, 1_0_2_4, 2_4, 2_4] , SCREAMING_SNAKE_CASE :Tuple=[0, 1] , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :List[str] , ) -> Optional[int]: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =hidden_size _a : Optional[int] =is_hybrid if self.is_hybrid: if backbone_config is None: logger.info("""Initializing the config with a `BiT` backbone.""" ) _a : Dict ={ """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, } _a : Dict =BitConfig(**SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): logger.info("""Initializing the config with a `BiT` backbone.""" ) _a : Tuple =BitConfig(**SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _a : Dict =backbone_config else: raise ValueError( f"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." ) _a : str =backbone_featmap_shape _a : Tuple =neck_ignore_stages if readout_type != "project": raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" ) else: _a : int =None _a : str =None _a : Optional[Any] =[] _a : Dict =num_hidden_layers _a : Any =num_attention_heads _a : Dict =intermediate_size _a : List[Any] =hidden_act _a : Dict =hidden_dropout_prob _a : List[Any] =attention_probs_dropout_prob _a : Union[str, Any] =initializer_range _a : Any =layer_norm_eps _a : Tuple =image_size _a : List[str] =patch_size _a : Dict =num_channels _a : str =qkv_bias _a : List[Any] =backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" ) _a : List[Any] =readout_type _a : str =reassemble_factors _a : Optional[Any] =neck_hidden_sizes _a : int =fusion_hidden_size _a : Dict =head_in_index _a : int =use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) _a : Any =use_auxiliary_head _a : int =auxiliary_loss_weight _a : List[Any] =semantic_loss_ignore_index _a : Any =semantic_classifier_dropout def __UpperCAmelCase ( self :List[str] ) -> Dict: '''simple docstring''' _a : Any =copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: _a : Union[str, Any] =self.backbone_config.to_dict() _a : Optional[int] =self.__class__.model_type return output
694
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int: for nxt, d in graph[v]: if nxt in visited_forward: continue _a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf ) _a : int =cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) _a : Tuple =new_cost_f _a : Optional[Any] =v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: _a : str =cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int: _a : Optional[Any] =-1 _a : List[str] =set() _a : Optional[int] =set() _a : Optional[int] ={source: 0} _a : List[str] ={destination: 0} _a : Union[str, Any] ={source: None} _a : Dict ={destination: None} _a : PriorityQueue[Any] =PriorityQueue() _a : PriorityQueue[Any] =PriorityQueue() _a : Optional[int] =np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): _a , _a : str =queue_forward.get() visited_forward.add(_UpperCAmelCase ) _a , _a : List[Any] =queue_backward.get() visited_backward.add(_UpperCAmelCase ) _a : int =pass_and_relaxation( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) _a : Any =pass_and_relaxation( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: _a : Any =shortest_distance return shortest_path_distance A__: Union[str, Any] = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } A__: str = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
694
'''simple docstring''' from math import factorial def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int: return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
694
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> bool: if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): raise ValueError("""check_bouncy() accepts only integer arguments""" ) _a : Optional[Any] =str(_UpperCAmelCase ) _a : int ="""""".join(sorted(_UpperCAmelCase ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float = 99 ) -> int: if not 0 < percent < 100: raise ValueError("""solution() only accepts values from 0 to 100""" ) _a : str =0 _a : Optional[Any] =1 while True: if check_bouncy(_UpperCAmelCase ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(F"{solution(99)}")
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list: _a : Tuple =len(_UpperCAmelCase ) _a : str =[] for i in range(len(_UpperCAmelCase ) - pat_len + 1 ): _a : int =True for j in range(_UpperCAmelCase ): if s[i + j] != pattern[j]: _a : int =False break if match_found: position.append(_UpperCAmelCase ) return position if __name__ == "__main__": assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3] print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
694
1
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar A__: Any = TypeVar('''KEY''') A__: int = TypeVar('''VAL''') @dataclass(frozen=UpperCAmelCase__ , slots=UpperCAmelCase__ ) class A__ ( Generic[KEY, VAL] ): __UpperCamelCase : KEY __UpperCamelCase : VAL class A__ ( _Item ): def __init__( self :Dict ) -> None: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __bool__( self :Optional[int] ) -> bool: '''simple docstring''' return False A__: Optional[Any] = _DeletedItem() class A__ ( MutableMapping[KEY, VAL] ): def __init__( self :Tuple , SCREAMING_SNAKE_CASE :int = 8 , SCREAMING_SNAKE_CASE :float = 0.75 ) -> None: '''simple docstring''' _a : List[str] =initial_block_size _a : list[_Item | None] =[None] * initial_block_size assert 0.0 < capacity_factor < 1.0 _a : str =capacity_factor _a : Tuple =0 def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :KEY ) -> int: '''simple docstring''' return hash(SCREAMING_SNAKE_CASE ) % len(self._buckets ) def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :int ) -> int: '''simple docstring''' return (ind + 1) % len(self._buckets ) def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :KEY , SCREAMING_SNAKE_CASE :VAL ) -> bool: '''simple docstring''' _a : Optional[int] =self._buckets[ind] if not stored: _a : List[str] =_Item(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self._len += 1 return True elif stored.key == key: _a : Optional[int] =_Item(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return True else: return False def __UpperCAmelCase ( self :str ) -> bool: '''simple docstring''' _a : int =len(self._buckets ) * self._capacity_factor return len(self ) >= int(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Union[str, Any] ) -> bool: '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False _a : List[Any] =len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> None: '''simple docstring''' _a : Any =self._buckets _a : int =[None] * new_size _a : Union[str, Any] =0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __UpperCAmelCase ( self :Tuple ) -> None: '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def __UpperCAmelCase ( self :Tuple ) -> None: '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :KEY ) -> Iterator[int]: '''simple docstring''' _a : Tuple =self._get_bucket_index(SCREAMING_SNAKE_CASE ) for _ in range(len(self._buckets ) ): yield ind _a : str =self._get_next_ind(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :KEY , SCREAMING_SNAKE_CASE :VAL ) -> None: '''simple docstring''' for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE ): if self._try_set(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): break def __setitem__( self :Optional[int] , SCREAMING_SNAKE_CASE :KEY , SCREAMING_SNAKE_CASE :VAL ) -> None: '''simple docstring''' if self._is_full(): self._size_up() self._add_item(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __delitem__( self :List[str] , SCREAMING_SNAKE_CASE :KEY ) -> None: '''simple docstring''' for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE ): _a : str =self._buckets[ind] if item is None: raise KeyError(SCREAMING_SNAKE_CASE ) if item is _deleted: continue if item.key == key: _a : Dict =_deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self :Optional[int] , SCREAMING_SNAKE_CASE :KEY ) -> VAL: '''simple docstring''' for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE ): _a : Optional[Any] =self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(SCREAMING_SNAKE_CASE ) def __len__( self :Optional[int] ) -> int: '''simple docstring''' return self._len def __iter__( self :Union[str, Any] ) -> Iterator[KEY]: '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self :Tuple ) -> str: '''simple docstring''' _a : List[Any] =""" ,""".join( f"{item.key}: {item.val}" for item in self._buckets if item ) return f"HashMap({val_string})"
694
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class A__ ( unittest.TestCase ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple: '''simple docstring''' _a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8} _a : int =parent _a : Optional[int] =batch_size _a : List[str] =num_channels _a : Optional[Any] =image_size _a : int =min_resolution _a : str =max_resolution _a : str =do_resize _a : Tuple =size _a : Tuple =do_normalize def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804], [-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self :List[Any] ) -> List[Any]: '''simple docstring''' _a : Any =ImageGPTImageProcessingTester(self ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self :Dict ) -> Any: '''simple docstring''' _a : Optional[Any] =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) ) def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]: '''simple docstring''' _a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} ) _a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} ) def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]: '''simple docstring''' _a : List[str] =self.image_processing_class(**self.image_processor_dict ) _a : Dict =json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) ) else: self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' _a : List[Any] =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" ) image_processor_first.to_json_file(SCREAMING_SNAKE_CASE ) _a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict() _a : Tuple =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> str: '''simple docstring''' _a : List[str] =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE ) _a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict() _a : Union[str, Any] =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: _a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" ) _a : Dict =Image.open(dataset[4]["""file"""] ) _a : Optional[int] =Image.open(dataset[5]["""file"""] ) _a : Optional[Any] =[imagea, imagea] return images @require_vision @require_torch class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) _a : int =prepare_images() # test non-batched _a : Dict =image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) ) _a : Optional[int] =[3_0_6, 1_9_1, 1_9_1] self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE ) # test batched _a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) ) _a : Any =[3_0_3, 1_3, 1_3] self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
694
1
'''simple docstring''' from __future__ import annotations A__: List[str] = [] def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list[int]] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool: for i in range(len(_UpperCAmelCase ) ): if board[row][i] == 1: return False for i in range(len(_UpperCAmelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(_UpperCAmelCase ,-1 ,-1 ) ,range(_UpperCAmelCase ,-1 ,-1 ) ): if board[i][j] == 1: return False for i, j in zip(range(_UpperCAmelCase ,-1 ,-1 ) ,range(_UpperCAmelCase ,len(_UpperCAmelCase ) ) ): if board[i][j] == 1: return False return True def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list[int]] ,_UpperCAmelCase : int ) -> bool: if row >= len(_UpperCAmelCase ): solution.append(_UpperCAmelCase ) printboard(_UpperCAmelCase ) print() return True for i in range(len(_UpperCAmelCase ) ): if is_safe(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ): _a : Union[str, Any] =1 solve(_UpperCAmelCase ,row + 1 ) _a : int =0 return False def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list[int]] ) -> None: for i in range(len(_UpperCAmelCase ) ): for j in range(len(_UpperCAmelCase ) ): if board[i][j] == 1: print("""Q""" ,end=""" """ ) else: print(""".""" ,end=""" """ ) print() # n=int(input("The no. of queens")) A__: List[Any] = 8 A__: List[Any] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('''The total no. of solutions are :''', len(solution))
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool: _a : Optional[int] =len(_UpperCAmelCase ) _a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): _a : Any =True # sum is not zero and set is empty then false for i in range(1 ,required_sum + 1 ): _a : int =False for i in range(1 ,arr_len + 1 ): for j in range(1 ,required_sum + 1 ): if arr[i - 1] > j: _a : Optional[Any] =subset[i - 1][j] if arr[i - 1] <= j: _a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int: _a , _a : Dict =1, 1 _a : Optional[int] =2 while True: _a : List[Any] =0 _a : Tuple =fa + fa _a , _a : Any =fa, f index += 1 for _ in str(_UpperCAmelCase ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int: _a : Optional[Any] =[] _a , _a : Union[str, Any] =0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_UpperCAmelCase ) _a , _a : Optional[Any] =b, a + b return sum(_UpperCAmelCase ) if __name__ == "__main__": print(F"{solution() = }")
694
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__: List[Any] = logging.get_logger(__name__) A__: List[str] = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict = "canine" def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any]=7_6_8 , SCREAMING_SNAKE_CASE :Tuple=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE :Dict="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Any=0.1 , SCREAMING_SNAKE_CASE :List[Any]=1_6_3_8_4 , SCREAMING_SNAKE_CASE :Optional[int]=1_6 , SCREAMING_SNAKE_CASE :str=0.02 , SCREAMING_SNAKE_CASE :int=1e-12 , SCREAMING_SNAKE_CASE :Union[str, Any]=0 , SCREAMING_SNAKE_CASE :Union[str, Any]=0Xe0_00 , SCREAMING_SNAKE_CASE :Optional[int]=0Xe0_01 , SCREAMING_SNAKE_CASE :Union[str, Any]=4 , SCREAMING_SNAKE_CASE :Union[str, Any]=4 , SCREAMING_SNAKE_CASE :List[Any]=8 , SCREAMING_SNAKE_CASE :List[Any]=1_6_3_8_4 , SCREAMING_SNAKE_CASE :Any=1_2_8 , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> Dict: '''simple docstring''' super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) _a : str =max_position_embeddings _a : List[Any] =hidden_size _a : Optional[int] =num_hidden_layers _a : Any =num_attention_heads _a : List[Any] =intermediate_size _a : Tuple =hidden_act _a : List[str] =hidden_dropout_prob _a : Union[str, Any] =attention_probs_dropout_prob _a : Optional[Any] =initializer_range _a : Optional[int] =type_vocab_size _a : Any =layer_norm_eps # Character config: _a : Dict =downsampling_rate _a : Optional[Any] =upsampling_kernel_size _a : str =num_hash_functions _a : Optional[Any] =num_hash_buckets _a : Any =local_transformer_stride
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return 0 elif n == 2: return 1 else: _a : Dict =[0, 1] for i in range(2 ,n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: _a : Union[str, Any] =0 _a : Optional[Any] =2 while digits < n: index += 1 _a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) ) return index def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int: return fibonacci_digits_index(_UpperCAmelCase ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
694
1
'''simple docstring''' import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[str] ) -> Optional[Any]: if isinstance(_UpperCAmelCase ,torch.Tensor ): return image elif isinstance(_UpperCAmelCase ,PIL.Image.Image ): _a : Tuple =[image] if isinstance(image[0] ,PIL.Image.Image ): _a : int =[np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image] _a : Optional[Any] =np.concatenate(_UpperCAmelCase ,axis=0 ) _a : List[str] =np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_5_5.0 _a : Union[str, Any] =image.transpose(0 ,3 ,1 ,2 ) _a : Any =2.0 * image - 1.0 _a : Union[str, Any] =torch.from_numpy(_UpperCAmelCase ) elif isinstance(image[0] ,torch.Tensor ): _a : Dict =torch.cat(_UpperCAmelCase ,dim=0 ) return image def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[Any]=0.9_9_9_5 ) -> Optional[int]: if not isinstance(_UpperCAmelCase ,np.ndarray ): _a : int =True _a : Dict =va.device _a : Dict =va.cpu().numpy() _a : Optional[Any] =va.cpu().numpy() _a : str =np.sum(va * va / (np.linalg.norm(_UpperCAmelCase ) * np.linalg.norm(_UpperCAmelCase )) ) if np.abs(_UpperCAmelCase ) > DOT_THRESHOLD: _a : Optional[int] =(1 - t) * va + t * va else: _a : Tuple =np.arccos(_UpperCAmelCase ) _a : str =np.sin(_UpperCAmelCase ) _a : int =theta_a * t _a : List[Any] =np.sin(_UpperCAmelCase ) _a : Tuple =np.sin(theta_a - theta_t ) / sin_theta_a _a : List[Any] =sin_theta_t / sin_theta_a _a : str =sa * va + sa * va if inputs_are_torch: _a : Optional[Any] =torch.from_numpy(_UpperCAmelCase ).to(_UpperCAmelCase ) return va def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : int ) -> Any: _a : List[str] =F.normalize(_UpperCAmelCase ,dim=-1 ) _a : Union[str, Any] =F.normalize(_UpperCAmelCase ,dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Union[str, Any] ) -> List[str]: for param in model.parameters(): _a : List[str] =value class A__ ( UpperCAmelCase__ ): def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :AutoencoderKL , SCREAMING_SNAKE_CASE :CLIPTextModel , SCREAMING_SNAKE_CASE :CLIPModel , SCREAMING_SNAKE_CASE :CLIPTokenizer , SCREAMING_SNAKE_CASE :UNetaDConditionModel , SCREAMING_SNAKE_CASE :Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , SCREAMING_SNAKE_CASE :CLIPFeatureExtractor , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :Optional[Any]=None , SCREAMING_SNAKE_CASE :Any=None , ) -> int: '''simple docstring''' super().__init__() self.register_modules( vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , clip_model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , coca_model=SCREAMING_SNAKE_CASE , coca_tokenizer=SCREAMING_SNAKE_CASE , coca_transform=SCREAMING_SNAKE_CASE , ) _a : str =( feature_extractor.size if isinstance(feature_extractor.size , SCREAMING_SNAKE_CASE ) else feature_extractor.size["""shortest_edge"""] ) _a : Optional[int] =transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , SCREAMING_SNAKE_CASE ) set_requires_grad(self.clip_model , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Optional[Union[str, int]] = "auto" ) -> Union[str, Any]: '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _a : Tuple =self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Tuple ) -> Optional[int]: '''simple docstring''' self.enable_attention_slicing(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :str ) -> List[Any]: '''simple docstring''' set_requires_grad(self.vae , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' set_requires_grad(self.vae , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :List[Any] ) -> List[str]: '''simple docstring''' set_requires_grad(self.unet , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' set_requires_grad(self.unet , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :int ) -> int: '''simple docstring''' # get the original timestep using init_timestep _a : List[Any] =min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE ) _a : Optional[int] =max(num_inference_steps - init_timestep , 0 ) _a : str =self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any]=None ) -> Optional[Any]: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ): raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE )}" ) _a : Union[str, Any] =image.to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _a : int =[ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE ) ] _a : Tuple =torch.cat(SCREAMING_SNAKE_CASE , dim=0 ) else: _a : Tuple =self.vae.encode(SCREAMING_SNAKE_CASE ).latent_dist.sample(SCREAMING_SNAKE_CASE ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _a : Union[str, Any] =0.18_215 * init_latents _a : Optional[Any] =init_latents.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 ) _a : Any =randn_tensor(init_latents.shape , generator=SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ) # get latents _a : List[str] =self.scheduler.add_noise(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Any =init_latents return latents def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :List[Any] ) -> List[Any]: '''simple docstring''' _a : Tuple =self.coca_transform(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): _a : Optional[int] =self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) _a : Optional[Any] =self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" ) def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Dict ) -> int: '''simple docstring''' _a : Dict =self.feature_extractor.preprocess(SCREAMING_SNAKE_CASE ) _a : str =torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half() _a : Tuple =self.clip_model.get_image_features(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE ) _a : Tuple =image_embeddings_clip.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 ) return image_embeddings_clip @torch.enable_grad() def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Dict , ) -> List[str]: '''simple docstring''' _a : Optional[Any] =latents.detach().requires_grad_() _a : Optional[Any] =self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # predict the noise residual _a : Dict =self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): _a : Union[str, Any] =self.scheduler.alphas_cumprod[timestep] _a : Union[str, Any] =1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a : Optional[Any] =(latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 _a : Optional[int] =torch.sqrt(SCREAMING_SNAKE_CASE ) _a : List[str] =pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , SCREAMING_SNAKE_CASE ): _a : List[Any] =self.scheduler.sigmas[index] _a : Union[str, Any] =latents - sigma * noise_pred else: raise ValueError(f"scheduler type {type(self.scheduler )} not supported" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _a : Union[str, Any] =1 / 0.18_215 * sample _a : Any =self.vae.decode(SCREAMING_SNAKE_CASE ).sample _a : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 ) _a : Tuple =transforms.Resize(self.feature_extractor_size )(SCREAMING_SNAKE_CASE ) _a : Dict =self.normalize(SCREAMING_SNAKE_CASE ).to(latents.dtype ) _a : Union[str, Any] =self.clip_model.get_image_features(SCREAMING_SNAKE_CASE ) _a : Optional[int] =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE ) _a : str =spherical_dist_loss(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).mean() * clip_guidance_scale _a : List[Any] =-torch.autograd.grad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] if isinstance(self.scheduler , SCREAMING_SNAKE_CASE ): _a : List[Any] =latents.detach() + grads * (sigma**2) _a : Optional[Any] =noise_pred_original else: _a : Tuple =noise_pred_original - torch.sqrt(SCREAMING_SNAKE_CASE ) * grads return noise_pred, latents @torch.no_grad() def __call__( self :Any , SCREAMING_SNAKE_CASE :Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE :Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[int] = 5_1_2 , SCREAMING_SNAKE_CASE :Optional[int] = 5_1_2 , SCREAMING_SNAKE_CASE :float = 0.6 , SCREAMING_SNAKE_CASE :Optional[int] = 5_0 , SCREAMING_SNAKE_CASE :Optional[float] = 7.5 , SCREAMING_SNAKE_CASE :Optional[int] = 1 , SCREAMING_SNAKE_CASE :float = 0.0 , SCREAMING_SNAKE_CASE :Optional[float] = 1_0_0 , SCREAMING_SNAKE_CASE :Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE :Optional[str] = "pil" , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :float = 0.8 , SCREAMING_SNAKE_CASE :float = 0.1 , SCREAMING_SNAKE_CASE :float = 0.1 , ) -> List[Any]: '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size: raise ValueError(f"You have passed {batch_size} batch_size, but only {len(SCREAMING_SNAKE_CASE )} generators." ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if isinstance(SCREAMING_SNAKE_CASE , torch.Generator ) and batch_size > 1: _a : List[str] =[generator] + [None] * (batch_size - 1) _a : Dict =[ ("""model""", self.coca_model is None), ("""tokenizer""", self.coca_tokenizer is None), ("""transform""", self.coca_transform is None), ] _a : Optional[int] =[x[0] for x in coca_is_none if x[1]] _a : str =""", """.join(SCREAMING_SNAKE_CASE ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(SCREAMING_SNAKE_CASE ): raise ValueError( f"Content prompt is None and CoCa [{coca_is_none_str}] is None." f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." ) _a : str =self.get_image_description(SCREAMING_SNAKE_CASE ) if style_prompt is None: if len(SCREAMING_SNAKE_CASE ): raise ValueError( f"Style prompt is None and CoCa [{coca_is_none_str}] is None." f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." ) _a : List[str] =self.get_image_description(SCREAMING_SNAKE_CASE ) # get prompt text embeddings for content and style _a : Optional[int] =self.tokenizer( SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE , return_tensors="""pt""" , ) _a : Dict =self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] _a : str =self.tokenizer( SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE , return_tensors="""pt""" , ) _a : Dict =self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] _a : Union[str, Any] =slerp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # duplicate text embeddings for each generation per prompt _a : str =text_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 ) # set timesteps _a : int ="""offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) _a : str ={} if accepts_offset: _a : str =1 self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) _a , _a : Tuple =self.get_timesteps(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.device ) _a : str =timesteps[:1].repeat(SCREAMING_SNAKE_CASE ) # Preprocess image _a : str =preprocess(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Dict =self.prepare_latents( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE ) _a : Optional[Any] =preprocess(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Any =self.prepare_latents( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE ) _a : str =slerp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if clip_guidance_scale > 0: _a : Union[str, Any] =self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Any =self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : str =slerp( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. _a : Tuple =guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: _a : Union[str, Any] =content_text_input.input_ids.shape[-1] _a : Dict =self.tokenizer([""""""] , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) _a : int =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt _a : List[str] =uncond_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _a : Union[str, Any] =torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. _a : str =(batch_size, self.unet.config.in_channels, height // 8, width // 8) _a : Dict =text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps _a : Dict =torch.randn(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE ).to( self.device ) else: _a : str =torch.randn(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=SCREAMING_SNAKE_CASE ) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) _a : Dict =latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _a : Optional[int] =latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _a : str ="""eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _a : int ={} if accepts_eta: _a : Any =eta # check if the scheduler accepts generator _a : Optional[Any] ="""generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: _a : List[str] =generator with self.progress_bar(total=SCREAMING_SNAKE_CASE ): for i, t in enumerate(SCREAMING_SNAKE_CASE ): # expand the latents if we are doing classifier free guidance _a : Any =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _a : str =self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # predict the noise residual _a : List[Any] =self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE ).sample # perform classifier free guidance if do_classifier_free_guidance: _a , _a : Tuple =noise_pred.chunk(2 ) _a : str =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: _a : Tuple =( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) _a , _a : Any =self.cond_fn( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) # compute the previous noisy sample x_t -> x_t-1 _a : Any =self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _a : List[str] =1 / 0.18_215 * latents _a : List[Any] =self.vae.decode(SCREAMING_SNAKE_CASE ).sample _a : Any =(image / 2 + 0.5).clamp(0 , 1 ) _a : int =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _a : Optional[int] =self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE , nsfw_content_detected=SCREAMING_SNAKE_CASE )
694
'''simple docstring''' import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str: # Initialise PyTorch model _a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase ) print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) ) _a : Dict =RemBertModel(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # Save pytorch-model print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) ) torch.save(model.state_dict() ,_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--rembert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained RemBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A__: Tuple = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
694
1
'''simple docstring''' import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class A__ ( unittest.TestCase ): __UpperCamelCase : Any = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING __UpperCamelCase : List[Any] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> List[str]: '''simple docstring''' _a : List[str] =AudioClassificationPipeline(model=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE ) # test with a raw waveform _a : str =np.zeros((3_4_0_0_0,) ) _a : Any =np.zeros((1_4_0_0_0,) ) return audio_classifier, [audioa, audio] def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Any ) -> List[str]: '''simple docstring''' _a , _a : Union[str, Any] =examples _a : Union[str, Any] =audio_classifier(SCREAMING_SNAKE_CASE ) # by default a model is initialized with num_labels=2 self.assertEqual( SCREAMING_SNAKE_CASE , [ {"""score""": ANY(SCREAMING_SNAKE_CASE ), """label""": ANY(SCREAMING_SNAKE_CASE )}, {"""score""": ANY(SCREAMING_SNAKE_CASE ), """label""": ANY(SCREAMING_SNAKE_CASE )}, ] , ) _a : int =audio_classifier(SCREAMING_SNAKE_CASE , top_k=1 ) self.assertEqual( SCREAMING_SNAKE_CASE , [ {"""score""": ANY(SCREAMING_SNAKE_CASE ), """label""": ANY(SCREAMING_SNAKE_CASE )}, ] , ) self.run_torchaudio(SCREAMING_SNAKE_CASE ) @require_torchaudio def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :List[str] ) -> Any: '''simple docstring''' import datasets # test with a local file _a : Optional[int] =datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) _a : Tuple =dataset[0]["""audio"""]["""array"""] _a : int =audio_classifier(SCREAMING_SNAKE_CASE ) self.assertEqual( SCREAMING_SNAKE_CASE , [ {"""score""": ANY(SCREAMING_SNAKE_CASE ), """label""": ANY(SCREAMING_SNAKE_CASE )}, {"""score""": ANY(SCREAMING_SNAKE_CASE ), """label""": ANY(SCREAMING_SNAKE_CASE )}, ] , ) @require_torch def __UpperCAmelCase ( self :Any ) -> Optional[int]: '''simple docstring''' _a : Optional[int] ="""anton-l/wav2vec2-random-tiny-classifier""" _a : List[Any] =pipeline("""audio-classification""" , model=SCREAMING_SNAKE_CASE ) _a : Any =np.ones((8_0_0_0,) ) _a : List[Any] =audio_classifier(SCREAMING_SNAKE_CASE , top_k=4 ) _a : List[Any] =[ {"""score""": 0.0_842, """label""": """no"""}, {"""score""": 0.0_838, """label""": """up"""}, {"""score""": 0.0_837, """label""": """go"""}, {"""score""": 0.0_834, """label""": """right"""}, ] _a : Dict =[ {"""score""": 0.0_845, """label""": """stop"""}, {"""score""": 0.0_844, """label""": """on"""}, {"""score""": 0.0_841, """label""": """right"""}, {"""score""": 0.0_834, """label""": """left"""}, ] self.assertIn(nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) _a : List[str] ={"""array""": np.ones((8_0_0_0,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate} _a : Tuple =audio_classifier(SCREAMING_SNAKE_CASE , top_k=4 ) self.assertIn(nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) @require_torch @slow def __UpperCAmelCase ( self :Any ) -> str: '''simple docstring''' import datasets _a : int ="""superb/wav2vec2-base-superb-ks""" _a : Optional[Any] =pipeline("""audio-classification""" , model=SCREAMING_SNAKE_CASE ) _a : List[Any] =datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" ) _a : Any =np.array(dataset[3]["""speech"""] , dtype=np.floataa ) _a : int =audio_classifier(SCREAMING_SNAKE_CASE , top_k=4 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=3 ) , [ {"""score""": 0.981, """label""": """go"""}, {"""score""": 0.007, """label""": """up"""}, {"""score""": 0.006, """label""": """_unknown_"""}, {"""score""": 0.001, """label""": """down"""}, ] , ) @require_tf @unittest.skip("""Audio classification is not implemented for TF""" ) def __UpperCAmelCase ( self :Tuple ) -> Tuple: '''simple docstring''' pass
694
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A__: Optional[int] = logging.get_logger(__name__) A__: Union[str, Any] = '''▁''' A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''} A__: Optional[int] = { '''vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''', }, '''monolingual_vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''', }, } A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024} class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = VOCAB_FILES_NAMES __UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None: '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it _a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token _a : int ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , ) _a : Dict =vocab_file _a : int =monolingual_vocab_file _a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _a : List[Any] ={} _a : List[str] =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: _a : Optional[Any] =cnt cnt += 1 with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _a : int =line.strip().split()[0] _a : str =len(self.fairseq_tokens_to_ids ) if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: _a : Optional[int] =len(self.fairseq_tokens_to_ids ) _a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self :int ) -> List[Any]: '''simple docstring''' _a : Optional[int] =self.__dict__.copy() _a : Optional[Any] =None _a : str =self.sp_model.serialized_model_proto() return state def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str: '''simple docstring''' _a : List[str] =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _a : Tuple ={} _a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _a : Optional[int] =[self.cls_token_id] _a : int =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _a : List[str] =[self.sep_token_id] _a : int =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __UpperCAmelCase ( self :Optional[int] ) -> int: '''simple docstring''' return len(self.fairseq_ids_to_tokens ) def __UpperCAmelCase ( self :int ) -> Union[str, Any]: '''simple docstring''' _a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict: '''simple docstring''' return self.fairseq_ids_to_tokens[index] def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip() return out_string def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _a : int =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _a : Any =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi: _a : Optional[Any] =self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" ) return out_vocab_file, out_monolingual_vocab_file
694
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available A__: Dict = { '''configuration_audio_spectrogram_transformer''': [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ASTConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: int = [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ASTForAudioClassification''', '''ASTModel''', '''ASTPreTrainedModel''', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: List[Any] = ['''ASTFeatureExtractor'''] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys A__: List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
694
'''simple docstring''' from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
694
1
'''simple docstring''' import json import os import torch from diffusers import UNetaDModel os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True) os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True) os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ) -> Union[str, Any]: if hor == 128: _a : List[Any] =("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") _a : Optional[Any] =(32, 128, 256) _a : str =("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 32: _a : Union[str, Any] =("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") _a : Optional[int] =(32, 64, 128, 256) _a : List[Any] =("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") _a : Any =torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" ) _a : Optional[int] =model.state_dict() _a : Any ={ """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 14, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 65536, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } _a : Optional[Any] =UNetaDModel(**_UpperCAmelCase ) print(F"length of state dict: {len(state_dict.keys() )}" ) print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" ) _a : Optional[int] =dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): _a : Optional[int] =state_dict.pop(_UpperCAmelCase ) hf_value_function.load_state_dict(_UpperCAmelCase ) torch.save(hf_value_function.state_dict() ,F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" ) with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" ,"""w""" ) as f: json.dump(_UpperCAmelCase ,_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( ) -> str: _a : Tuple ={ """in_channels""": 14, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (32, 64, 128, 256), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 65536, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } _a : List[Any] =torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" ) _a : Any =model _a : Union[str, Any] =UNetaDModel(**_UpperCAmelCase ) print(F"length of state dict: {len(state_dict.keys() )}" ) print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" ) _a : Dict =dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): _a : List[str] =state_dict.pop(_UpperCAmelCase ) hf_value_function.load_state_dict(_UpperCAmelCase ) torch.save(hf_value_function.state_dict() ,"""hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" ) with open("""hub/hopper-medium-v2/value_function/config.json""" ,"""w""" ) as f: json.dump(_UpperCAmelCase ,_UpperCAmelCase ) if __name__ == "__main__": unet(32) # unet(128) value_function()
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]: _a : Dict =current_set.copy() for row_index, row in enumerate(_UpperCAmelCase ): _a : Any =row[0] for column_index, column in enumerate(_UpperCAmelCase ): if magnitude == 0: _a : Any =column continue _a : Union[str, Any] =column / magnitude # Subtract to cancel term _a : Optional[Any] =current_set[0] _a : List[Any] =[first_row] _a : Tuple =current_set[1::] for row in current_set: _a : Any =[] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(_UpperCAmelCase ) continue for column_index in range(len(_UpperCAmelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(_UpperCAmelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: _a : List[str] =final_set[0] _a : Tuple =[] _a : Tuple =[] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _a : str =simplify(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): resultant[i].insert(0 ,current_first_column[i] ) resultant.insert(0 ,_UpperCAmelCase ) _a : List[Any] =resultant return final_set def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list: if len(_UpperCAmelCase ) == 0: raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) _a : str =len(_UpperCAmelCase ) + 1 if any(len(_UpperCAmelCase ) != _length for item in equations ): raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) for row in equations: if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ): raise ValueError("""solve_simultaneous() requires lists of integers""" ) if len(_UpperCAmelCase ) == 1: return [equations[0][-1] / equations[0][0]] _a : str =equations.copy() if any(0 in row for row in data_set ): _a : Optional[int] =data_set.copy() _a : str =[] for row_index, row in enumerate(_UpperCAmelCase ): if 0 not in row: _a : List[Any] =data_set.pop(_UpperCAmelCase ) break if not full_row: raise ValueError("""solve_simultaneous() requires at least 1 full equation""" ) data_set.insert(0 ,_UpperCAmelCase ) _a : Dict =data_set.copy() _a : Any =simplify(_UpperCAmelCase ) _a : Any =simplified[::-1] _a : list =[] for row in simplified: _a : Optional[Any] =row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(_UpperCAmelCase ) == 0: solutions.append(0 ) continue _a : List[str] =temp_row[1::] _a : int =temp_row[::-1] for column_index, column in enumerate(_UpperCAmelCase ): current_solution -= column * solutions[column_index] solutions.append(_UpperCAmelCase ) _a : Tuple =[] for item in solutions: final.append(float(round(_UpperCAmelCase ,5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() A__: int = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
694
1
'''simple docstring''' A__: List[str] = 6_5521 def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> int: _a : List[Any] =1 _a : str =0 for plain_chr in plain_text: _a : Dict =(a + ord(_UpperCAmelCase )) % MOD_ADLER _a : Tuple =(b + a) % MOD_ADLER return (b << 16) | a
694
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__: Dict = logging.get_logger(__name__) A__: Optional[int] = { '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = "markuplm" def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any: '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) _a : Any =vocab_size _a : List[str] =hidden_size _a : List[str] =num_hidden_layers _a : Tuple =num_attention_heads _a : Union[str, Any] =hidden_act _a : Tuple =intermediate_size _a : Optional[Any] =hidden_dropout_prob _a : int =attention_probs_dropout_prob _a : Any =max_position_embeddings _a : List[Any] =type_vocab_size _a : List[Any] =initializer_range _a : List[Any] =layer_norm_eps _a : Optional[int] =position_embedding_type _a : List[Any] =use_cache _a : List[str] =classifier_dropout # additional properties _a : int =max_depth _a : Union[str, Any] =max_xpath_tag_unit_embeddings _a : str =max_xpath_subs_unit_embeddings _a : int =tag_pad_id _a : List[Any] =subs_pad_id _a : str =xpath_unit_hidden_size
694
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A__: List[str] = { '''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Optional[int] = ['''VivitImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Dict = [ '''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''VivitModel''', '''VivitPreTrainedModel''', '''VivitForVideoClassification''', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys A__: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
694
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''') def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict: hf_model.apply_weight_norm() _a : Any =checkpoint["""input_conv.weight_g"""] _a : Union[str, Any] =checkpoint["""input_conv.weight_v"""] _a : Optional[int] =checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): _a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"] _a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"] _a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): _a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"] _a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"] _a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"] _a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"] _a : Dict =checkpoint["""output_conv.1.weight_g"""] _a : str =checkpoint["""output_conv.1.weight_v"""] _a : Union[str, Any] =checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]: if config_path is not None: _a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase ) else: _a : str =SpeechTaHifiGanConfig() _a : Tuple =SpeechTaHifiGan(_UpperCAmelCase ) _a : int =torch.load(_UpperCAmelCase ) load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase ) _a : Dict =np.load(_UpperCAmelCase ) _a : Union[str, Any] =stats[0].reshape(-1 ) _a : Any =stats[1].reshape(-1 ) _a : Tuple =torch.from_numpy(_UpperCAmelCase ).float() _a : List[str] =torch.from_numpy(_UpperCAmelCase ).float() model.save_pretrained(_UpperCAmelCase ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) A__: Tuple = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
694
1
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class A__ ( unittest.TestCase ): def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any] ) -> Union[str, Any]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): _a : Union[str, Any] =model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Any ) -> List[Any]: '''simple docstring''' _a : int ="""sshleifer/tiny-gpt2""" _a : Tuple =TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE , inference=SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=SCREAMING_SNAKE_CASE , multi_process=SCREAMING_SNAKE_CASE , ) _a : Optional[Any] =TensorFlowBenchmark(SCREAMING_SNAKE_CASE ) _a : List[str] =benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]: '''simple docstring''' _a : List[Any] ="""sgugger/tiny-distilbert-classification""" _a : Optional[int] =TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE , inference=SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE , only_pretrain_model=SCREAMING_SNAKE_CASE , ) _a : Optional[Any] =TensorFlowBenchmark(SCREAMING_SNAKE_CASE ) _a : Optional[Any] =benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __UpperCAmelCase ( self :Tuple ) -> int: '''simple docstring''' _a : List[str] ="""sshleifer/tiny-gpt2""" _a : Union[str, Any] =TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE , inference=SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE , ) _a : Optional[Any] =TensorFlowBenchmark(SCREAMING_SNAKE_CASE ) _a : List[Any] =benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __UpperCAmelCase ( self :str ) -> Optional[Any]: '''simple docstring''' _a : Union[str, Any] ="""sshleifer/tiny-gpt2""" _a : Union[str, Any] =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) _a : List[str] =TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE , inference=SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=SCREAMING_SNAKE_CASE , multi_process=SCREAMING_SNAKE_CASE , ) _a : List[Any] =TensorFlowBenchmark(SCREAMING_SNAKE_CASE , [config] ) _a : int =benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __UpperCAmelCase ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' _a : List[str] ="""sshleifer/tiny-gpt2""" _a : Union[str, Any] =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) _a : Any =TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE , inference=SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE , ) _a : Optional[int] =TensorFlowBenchmark(SCREAMING_SNAKE_CASE , [config] ) _a : int =benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __UpperCAmelCase ( self :List[str] ) -> List[str]: '''simple docstring''' _a : int ="""sshleifer/tiny-gpt2""" _a : Any =TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE , inference=SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE , ) _a : List[str] =TensorFlowBenchmark(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __UpperCAmelCase ( self :Any ) -> List[Any]: '''simple docstring''' _a : List[str] ="""sshleifer/tiny-gpt2""" _a : Tuple =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) _a : Dict =TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE , inference=SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE , ) _a : Dict =TensorFlowBenchmark(SCREAMING_SNAKE_CASE , [config] ) _a : str =benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __UpperCAmelCase ( self :Dict ) -> Tuple: '''simple docstring''' _a : str ="""patrickvonplaten/t5-tiny-random""" _a : List[Any] =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) _a : List[str] =TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE , inference=SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE , ) _a : List[Any] =TensorFlowBenchmark(SCREAMING_SNAKE_CASE , configs=[config] ) _a : str =benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" ) def __UpperCAmelCase ( self :Dict ) -> Any: '''simple docstring''' _a : Union[str, Any] ="""sshleifer/tiny-gpt2""" _a : Optional[int] =TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE , inference=SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , use_xla=SCREAMING_SNAKE_CASE , multi_process=SCREAMING_SNAKE_CASE , ) _a : int =TensorFlowBenchmark(SCREAMING_SNAKE_CASE ) _a : Tuple =benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __UpperCAmelCase ( self :List[str] ) -> Tuple: '''simple docstring''' _a : Dict ="""sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: _a : str =TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=SCREAMING_SNAKE_CASE , save_to_csv=SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(SCREAMING_SNAKE_CASE , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(SCREAMING_SNAKE_CASE , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(SCREAMING_SNAKE_CASE , """env.csv""" ) , multi_process=SCREAMING_SNAKE_CASE , ) _a : Optional[Any] =TensorFlowBenchmark(SCREAMING_SNAKE_CASE ) benchmark.run() self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE , """env.csv""" ) ).exists() ) def __UpperCAmelCase ( self :Dict ) -> Optional[int]: '''simple docstring''' _a : Dict ="""sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(SCREAMING_SNAKE_CASE :List[str] ): self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """sequential""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """cumulative""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """current""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: _a : str =TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(SCREAMING_SNAKE_CASE , """log.txt""" ) , log_print=SCREAMING_SNAKE_CASE , trace_memory_line_by_line=SCREAMING_SNAKE_CASE , eager_mode=SCREAMING_SNAKE_CASE , multi_process=SCREAMING_SNAKE_CASE , ) _a : Optional[int] =TensorFlowBenchmark(SCREAMING_SNAKE_CASE ) _a : Any =benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE , """log.txt""" ) ).exists() )
694
'''simple docstring''' class A__ : def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]: '''simple docstring''' _a : List[str] =None _a : Optional[Any] =None _a : str =graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Optional[int] =len(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =None def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any: '''simple docstring''' if sources is int: _a : Tuple =[sources] if sinks is int: _a : Optional[int] =[sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return _a : Union[str, Any] =sources[0] _a : Tuple =sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: _a : Tuple =0 for i in sources: max_input_flow += sum(self.graph[i] ) _a : List[Any] =len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _a : Any =max_input_flow _a : List[str] =0 _a : List[str] =len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _a : str =max_input_flow _a : Optional[Any] =size - 1 def __UpperCAmelCase ( self :Optional[int] ) -> Tuple: '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int: '''simple docstring''' _a : Tuple =algorithm(self ) class A__ : def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict: '''simple docstring''' _a : List[str] =flow_network _a : List[Any] =flow_network.verticesCount _a : str =flow_network.sourceIndex _a : str =flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _a : List[Any] =flow_network.graph _a : Optional[int] =False def __UpperCAmelCase ( self :List[Any] ) -> List[str]: '''simple docstring''' if not self.executed: self._algorithm() _a : Any =True def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]: '''simple docstring''' pass class A__ ( UpperCAmelCase__ ): def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result _a : List[Any] =-1 def __UpperCAmelCase ( self :Dict ) -> Tuple: '''simple docstring''' if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class A__ ( UpperCAmelCase__ ): def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) _a : int =[[0] * self.verticies_count for i in range(self.verticies_count )] _a : Union[str, Any] =[0] * self.verticies_count _a : Optional[Any] =[0] * self.verticies_count def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]: '''simple docstring''' _a : int =self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _a : Tuple =[ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _a : List[Any] =0 while i < len(SCREAMING_SNAKE_CASE ): _a : Any =vertices_list[i] _a : str =self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) _a : List[str] =0 else: i += 1 _a : Optional[int] =sum(self.preflow[self.source_index] ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]: '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' _a : List[str] =min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]: '''simple docstring''' _a : int =None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _a : Optional[Any] =self.heights[to_index] if min_height is not None: _a : Any =min_height + 1 if __name__ == "__main__": A__: str = [0] A__: Optional[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network A__: Union[str, Any] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate A__: List[str] = flow_network.find_maximum_flow() print(F"maximum flow is {maximum_flow}")
694
1
'''simple docstring''' import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ ( unittest.TestCase ): def __init__( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple=3 , SCREAMING_SNAKE_CASE :Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE :int=3 , SCREAMING_SNAKE_CASE :Optional[Any]=1_0 , SCREAMING_SNAKE_CASE :List[Any]=[1_0, 2_0, 3_0, 4_0] , SCREAMING_SNAKE_CASE :Optional[int]=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Optional[int]="relu" , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :List[str]=None , ) -> Optional[Any]: '''simple docstring''' _a : str =parent _a : Tuple =batch_size _a : Optional[Any] =image_size _a : List[str] =num_channels _a : str =embeddings_size _a : Any =hidden_sizes _a : Union[str, Any] =depths _a : Any =is_training _a : Dict =use_labels _a : Tuple =hidden_act _a : str =num_labels _a : Dict =scope _a : List[Any] =len(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> str: '''simple docstring''' _a : int =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a : List[Any] =self.get_config() return config, pixel_values def __UpperCAmelCase ( self :Dict ) -> int: '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int ) -> str: '''simple docstring''' _a : Optional[Any] =FlaxRegNetModel(config=SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =model(SCREAMING_SNAKE_CASE ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :List[str] ) -> Optional[Any]: '''simple docstring''' _a : Optional[Any] =self.num_labels _a : str =FlaxRegNetForImageClassification(config=SCREAMING_SNAKE_CASE ) _a : Dict =model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self :List[str] ) -> Dict: '''simple docstring''' _a : Tuple =self.prepare_config_and_inputs() _a , _a : List[str] =config_and_inputs _a : int ={"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : List[str] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () __UpperCamelCase : int = False __UpperCamelCase : Any = False __UpperCamelCase : Optional[int] = False def __UpperCAmelCase ( self :Optional[Any] ) -> None: '''simple docstring''' _a : List[Any] =FlaxRegNetModelTester(self ) _a : List[str] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Any ) -> List[str]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCAmelCase ( self :Optional[int] ) -> int: '''simple docstring''' return def __UpperCAmelCase ( self :Optional[int] ) -> List[str]: '''simple docstring''' _a : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Union[str, Any] ) -> str: '''simple docstring''' _a : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def __UpperCAmelCase ( self :str ) -> str: '''simple docstring''' pass @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def __UpperCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' pass def __UpperCAmelCase ( self :Dict ) -> str: '''simple docstring''' _a , _a : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : int =model_class(SCREAMING_SNAKE_CASE ) _a : Tuple =inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : Any =[*signature.parameters.keys()] _a : str =["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :int ) -> Tuple: '''simple docstring''' def check_hidden_states_output(SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ): _a : List[str] =model_class(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) _a : Tuple =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _a : Union[str, Any] =self.model_tester.num_stages self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) _a , _a : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : Tuple =True check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _a : Union[str, Any] =True check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' _a , _a : Any =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _a : Optional[int] =self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =model_class(SCREAMING_SNAKE_CASE ) @jax.jit def model_jitted(SCREAMING_SNAKE_CASE :Dict , **SCREAMING_SNAKE_CASE :str ): return model(pixel_values=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) with self.subTest("""JIT Enabled""" ): _a : Any =model_jitted(**SCREAMING_SNAKE_CASE ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _a : int =model_jitted(**SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): self.assertEqual(jitted_output.shape , output.shape ) def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: _a : Tuple =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_flax class A__ ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self :Tuple ) -> str: '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None @slow def __UpperCAmelCase ( self :List[str] ) -> str: '''simple docstring''' _a : str =FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" ) _a : int =self.default_image_processor _a : Dict =prepare_img() _a : List[Any] =image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""np""" ) _a : List[Any] =model(**SCREAMING_SNAKE_CASE ) # verify the logits _a : Optional[Any] =(1, 1_0_0_0) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) _a : int =jnp.array([-0.4_180, -1.5_051, -3.4_836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
694
'''simple docstring''' A__: Optional[int] = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] A__: int = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
694
1
'''simple docstring''' import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class A__ : def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=7 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :List[str]=9_9 , SCREAMING_SNAKE_CASE :Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5 , SCREAMING_SNAKE_CASE :Union[str, Any]=4 , SCREAMING_SNAKE_CASE :Optional[int]=3_7 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[str]=1_6 , SCREAMING_SNAKE_CASE :str=2 , SCREAMING_SNAKE_CASE :int=0.02 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :int=4 , SCREAMING_SNAKE_CASE :Any=None , ) -> Any: '''simple docstring''' _a : Union[str, Any] =parent _a : Optional[Any] =batch_size _a : Optional[int] =seq_length _a : Dict =is_training _a : Optional[int] =use_input_mask _a : List[Any] =use_token_type_ids _a : Dict =use_labels _a : Optional[Any] =vocab_size _a : Union[str, Any] =hidden_size _a : Optional[int] =num_hidden_layers _a : int =num_attention_heads _a : Optional[Any] =intermediate_size _a : Tuple =hidden_act _a : Any =hidden_dropout_prob _a : int =attention_probs_dropout_prob _a : Dict =max_position_embeddings _a : int =type_vocab_size _a : str =type_sequence_label_size _a : List[str] =initializer_range _a : List[Any] =num_labels _a : List[Any] =num_choices _a : Union[str, Any] =scope def __UpperCAmelCase ( self :str ) -> Tuple: '''simple docstring''' _a : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : List[str] =None if self.use_input_mask: _a : Tuple =random_attention_mask([self.batch_size, self.seq_length] ) _a : List[str] =None _a : List[str] =None _a : Dict =None _a : int =None if self.use_labels: _a : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _a : Dict =ids_tensor([self.batch_size] , self.num_choices ) _a : Dict =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self :int ) -> Union[str, Any]: '''simple docstring''' return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=SCREAMING_SNAKE_CASE , ) def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Any: '''simple docstring''' _a : str =FalconModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : Dict =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) _a : str =model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Any , ) -> List[Any]: '''simple docstring''' _a : Optional[Any] =True _a : List[str] =FalconModel(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : List[str] =model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , ) _a : Optional[int] =model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , ) _a : Union[str, Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any] , ) -> List[Any]: '''simple docstring''' _a : int =FalconForCausalLM(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : int =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[Any] , ) -> Dict: '''simple docstring''' _a : Dict =True _a : Any =True _a : Tuple =FalconForCausalLM(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() # first forward pass _a : int =model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , ) _a : Any =outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _a : List[str] =ids_tensor((self.batch_size, 3) , config.vocab_size ) _a : Dict =ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _a : int =torch.cat([input_ids, next_tokens] , dim=-1 ) _a : Optional[int] =torch.cat([input_mask, next_mask] , dim=-1 ) _a : Union[str, Any] =model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )["""hidden_states"""][0] _a : str =model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )["""hidden_states"""][0] # select random slice _a : Dict =ids_tensor((1,) , output_from_past.shape[-1] ).item() _a : List[str] =output_from_no_past[:, -3:, random_slice_idx].detach() _a : Union[str, Any] =output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' _a : Optional[Any] =self.prepare_config_and_inputs() ( ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ) : int =config_and_inputs _a : Dict ={"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : Optional[Any] = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) __UpperCamelCase : Union[str, Any] = (FalconForCausalLM,) if is_torch_available() else () __UpperCamelCase : List[str] = ( { "feature-extraction": FalconModel, "text-classification": FalconForSequenceClassification, "text-generation": FalconForCausalLM, "question-answering": FalconForQuestionAnswering, "token-classification": FalconForTokenClassification, "zero-shot": FalconForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase : Union[str, Any] = False __UpperCamelCase : Dict = False def __UpperCAmelCase ( self :Optional[Any] ) -> str: '''simple docstring''' _a : str =FalconModelTester(self ) _a : str =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 ) def __UpperCAmelCase ( self :int ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _a : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> Union[str, Any]: '''simple docstring''' _a , *_a : Optional[Any] =self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: _a : int =alibi self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Dict ) -> int: '''simple docstring''' _a , _a : Tuple =self.model_tester.prepare_config_and_inputs_for_common() _a : int =3 _a : List[Any] =input_dict["""input_ids"""] _a : Any =input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _a : str =FalconForSequenceClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : str =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCAmelCase ( self :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' _a , _a : Tuple =self.model_tester.prepare_config_and_inputs_for_common() _a : str =3 _a : List[str] ="""single_label_classification""" _a : List[str] =input_dict["""input_ids"""] _a : int =input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE ) _a : Optional[int] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _a : Any =FalconForSequenceClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : Optional[Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCAmelCase ( self :Optional[Any] ) -> List[Any]: '''simple docstring''' _a , _a : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common() _a : Optional[int] =input_dict["""input_ids"""] _a : Tuple =FalconForCausalLM(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : Tuple =model(SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE ) _a : Any =input_ids.shape[0] _a : Optional[Any] =model._convert_to_rw_cache(result.past_key_values ) _a : Tuple =model._convert_cache_to_standard_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for layer in range(len(SCREAMING_SNAKE_CASE ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def __UpperCAmelCase ( self :int ) -> Optional[Any]: '''simple docstring''' _a , _a : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() _a : Union[str, Any] =3 _a : List[Any] ="""multi_label_classification""" _a : str =input_dict["""input_ids"""] _a : Optional[Any] =input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE ) _a : Optional[int] =ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) _a : Optional[Any] =FalconForSequenceClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() _a : Tuple =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCAmelCase ( self :int ) -> Dict: '''simple docstring''' # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: _a , _a : int =self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(SCREAMING_SNAKE_CASE , """use_cache""" ): return _a : List[str] =model_class(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) if "use_cache" not in inputs: _a : int =True _a : List[Any] =model(**SCREAMING_SNAKE_CASE ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return _a : Dict =( getattr(SCREAMING_SNAKE_CASE , """decoder_layers""" , SCREAMING_SNAKE_CASE ) or getattr(SCREAMING_SNAKE_CASE , """num_decoder_layers""" , SCREAMING_SNAKE_CASE ) or config.num_hidden_layers ) _a : Tuple =getattr(SCREAMING_SNAKE_CASE , """num_kv_heads""" , config.num_attention_heads ) _a : List[Any] =getattr(SCREAMING_SNAKE_CASE , """d_model""" , config.hidden_size ) _a : Dict =embed_dim // num_attention_heads _a : Any =outputs["""past_key_values"""] self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) _a , _a : Optional[Any] =inputs["""input_ids"""].shape for i in range(SCREAMING_SNAKE_CASE ): if config.new_decoder_architecture: _a : List[Any] =config.num_attention_heads elif config.multi_query: _a : int =1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Optional[int] ) -> List[str]: '''simple docstring''' _a : List[Any] =AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" ) _a : List[str] =FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" ) model.eval() model.to(SCREAMING_SNAKE_CASE ) _a : Any =tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE ) _a : Optional[Any] =( """My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.""" ) _a : str =model.generate(**SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , max_new_tokens=1_9 ) _a : Any =tokenizer.batch_decode(SCREAMING_SNAKE_CASE )[0] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __UpperCAmelCase ( self :Optional[Any] ) -> List[str]: '''simple docstring''' # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: _a : Optional[Any] =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) _a : int =FalconForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE ) model.eval() model.to(SCREAMING_SNAKE_CASE ) _a : Optional[int] =tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , max_new_tokens=4 ) model.generate(**SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , max_new_tokens=4 ) model.generate(**SCREAMING_SNAKE_CASE , num_beams=2 , max_new_tokens=4 ) @slow def __UpperCAmelCase ( self :List[str] ) -> int: '''simple docstring''' # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: _a : Optional[Any] =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) _a : Optional[int] =FalconForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE ) model.eval() model.to(device=SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE ) # Test results are the same with and without cache _a : List[Any] =model.generate(**SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , max_new_tokens=2_0 , use_cache=SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =model.generate(**SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , max_new_tokens=2_0 , use_cache=SCREAMING_SNAKE_CASE ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F"{price_plus_tax(100, 0.25) = }") print(F"{price_plus_tax(125.50, 0.05) = }")
694
1
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ) -> Tuple: for param in module.parameters(): _a : Tuple =False def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: _a : Tuple ="""cuda""" if torch.cuda.is_available() else """cpu""" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _a : List[str] ="""mps""" if device == "mps": print( """WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch""" """ errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues""" """ with generations.""" ) return device def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ) -> Any: _a : List[str] =plt.imshow(_UpperCAmelCase ) fig.axes.get_xaxis().set_visible(_UpperCAmelCase ) fig.axes.get_yaxis().set_visible(_UpperCAmelCase ) plt.show() def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: _a : str =datetime.now() _a : List[str] =current_time.strftime("""%H:%M:%S""" ) return timestamp
694
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class A__ ( unittest.TestCase ): def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple: '''simple docstring''' _a : Optional[Any] =parent _a : List[str] =batch_size _a : List[str] =seq_length _a : List[Any] =is_training _a : Optional[int] =use_attention_mask _a : List[Any] =use_token_type_ids _a : List[Any] =use_labels _a : Optional[Any] =vocab_size _a : str =hidden_size _a : List[Any] =num_hidden_layers _a : List[Any] =num_attention_heads _a : Union[str, Any] =intermediate_size _a : int =hidden_act _a : List[str] =hidden_dropout_prob _a : Optional[int] =attention_probs_dropout_prob _a : Dict =max_position_embeddings _a : Any =type_vocab_size _a : str =type_sequence_label_size _a : str =initializer_range _a : List[str] =num_choices def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict: '''simple docstring''' _a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : Dict =None if self.use_attention_mask: _a : Any =random_attention_mask([self.batch_size, self.seq_length] ) _a : Optional[int] =None if self.use_token_type_ids: _a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _a : Union[str, Any] =RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' _a : Tuple =self.prepare_config_and_inputs() _a , _a , _a , _a : List[Any] =config_and_inputs _a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def __UpperCAmelCase ( self :int ) -> str: '''simple docstring''' _a : List[Any] =self.prepare_config_and_inputs() _a , _a , _a , _a : Optional[int] =config_and_inputs _a : Tuple =True _a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : Union[str, Any] = True __UpperCamelCase : Dict = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCAmelCase ( self :List[str] ) -> Optional[int]: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self ) @slow def __UpperCAmelCase ( self :str ) -> int: '''simple docstring''' for model_class_name in self.all_model_classes: _a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Dict =model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_flax class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Any ) -> str: '''simple docstring''' _a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Dict =model(SCREAMING_SNAKE_CASE )[0] _a : List[Any] =[1, 1_1, 5_0_2_6_5] self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. _a : Any =np.array( [[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def __UpperCAmelCase ( self :int ) -> int: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. _a : str =np.array( [[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
694
1
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A__: List[Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) A__: Optional[Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append( (F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight")) rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias")) rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight")) rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias")) rename_keys.append( (F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append( (F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append( ( F"transformer.decoder.layers.{i}.cross_attn.out_proj.weight", F"decoder.layers.{i}.encoder_attn.out_proj.weight", ) ) rename_keys.append( ( F"transformer.decoder.layers.{i}.cross_attn.out_proj.bias", F"decoder.layers.{i}.encoder_attn.out_proj.bias", ) ) rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight")) rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias")) rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight")) rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias")) rename_keys.append( (F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append( (F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias")) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", F"decoder.layers.{i}.sa_qcontent_proj.weight") ) rename_keys.append( (F"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", F"decoder.layers.{i}.sa_kcontent_proj.weight") ) rename_keys.append( (F"transformer.decoder.layers.{i}.sa_qpos_proj.weight", F"decoder.layers.{i}.sa_qpos_proj.weight") ) rename_keys.append( (F"transformer.decoder.layers.{i}.sa_kpos_proj.weight", F"decoder.layers.{i}.sa_kpos_proj.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.sa_v_proj.weight", F"decoder.layers.{i}.sa_v_proj.weight")) rename_keys.append( (F"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", F"decoder.layers.{i}.ca_qcontent_proj.weight") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", F"decoder.layers.{i}.ca_kcontent_proj.weight") ) rename_keys.append( (F"transformer.decoder.layers.{i}.ca_kpos_proj.weight", F"decoder.layers.{i}.ca_kpos_proj.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.ca_v_proj.weight", F"decoder.layers.{i}.ca_v_proj.weight")) rename_keys.append( (F"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", F"decoder.layers.{i}.ca_qpos_sine_proj.weight") ) rename_keys.append( (F"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", F"decoder.layers.{i}.sa_qcontent_proj.bias") ) rename_keys.append( (F"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", F"decoder.layers.{i}.sa_kcontent_proj.bias") ) rename_keys.append((F"transformer.decoder.layers.{i}.sa_qpos_proj.bias", F"decoder.layers.{i}.sa_qpos_proj.bias")) rename_keys.append((F"transformer.decoder.layers.{i}.sa_kpos_proj.bias", F"decoder.layers.{i}.sa_kpos_proj.bias")) rename_keys.append((F"transformer.decoder.layers.{i}.sa_v_proj.bias", F"decoder.layers.{i}.sa_v_proj.bias")) rename_keys.append( (F"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", F"decoder.layers.{i}.ca_qcontent_proj.bias") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", F"decoder.layers.{i}.ca_kcontent_proj.bias") ) rename_keys.append((F"transformer.decoder.layers.{i}.ca_kpos_proj.bias", F"decoder.layers.{i}.ca_kpos_proj.bias")) rename_keys.append((F"transformer.decoder.layers.{i}.ca_v_proj.bias", F"decoder.layers.{i}.ca_v_proj.bias")) rename_keys.append( (F"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", F"decoder.layers.{i}.ca_qpos_sine_proj.bias") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''), ('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''), ('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''), ('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''), ('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''), ('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''), ('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''), ('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''), ('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''), ('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''), ] ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[Any] ) -> Tuple: _a : Union[str, Any] =state_dict.pop(_UpperCAmelCase ) _a : List[Any] =val def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ) -> Any: _a : Optional[int] =OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: _a : Tuple =key.replace("""backbone.0.body""" ,"""backbone.conv_encoder.model""" ) _a : Dict =value else: _a : Dict =value return new_state_dict def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str=False ) -> Optional[int]: _a : List[str] ="""""" if is_panoptic: _a : Union[str, Any] ="""conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _a : Optional[Any] =state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) _a : Optional[Any] =state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict _a : Union[str, Any] =in_proj_weight[:256, :] _a : Tuple =in_proj_bias[:256] _a : List[Any] =in_proj_weight[256:512, :] _a : List[str] =in_proj_bias[256:512] _a : int =in_proj_weight[-256:, :] _a : Dict =in_proj_bias[-256:] def SCREAMING_SNAKE_CASE_ ( ) -> Dict: _a : Union[str, Any] ="""http://images.cocodataset.org/val2017/000000039769.jpg""" _a : str =Image.open(requests.get(_UpperCAmelCase ,stream=_UpperCAmelCase ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Dict ) -> Tuple: _a : Optional[int] =ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: _a : Optional[Any] ="""resnet101""" if "dc5" in model_name: _a : Optional[Any] =True _a : List[Any] ="""panoptic""" in model_name if is_panoptic: _a : int =250 else: _a : Dict =91 _a : Optional[Any] ="""huggingface/label-files""" _a : Optional[Any] ="""coco-detection-id2label.json""" _a : Union[str, Any] =json.load(open(hf_hub_download(_UpperCAmelCase ,_UpperCAmelCase ,repo_type="""dataset""" ) ,"""r""" ) ) _a : Tuple ={int(_UpperCAmelCase ): v for k, v in idalabel.items()} _a : List[Any] =idalabel _a : List[Any] ={v: k for k, v in idalabel.items()} # load image processor _a : Tuple ="""coco_panoptic""" if is_panoptic else """coco_detection""" _a : Optional[Any] =ConditionalDetrImageProcessor(format=_UpperCAmelCase ) # prepare image _a : int =prepare_img() _a : Union[str, Any] =image_processor(images=_UpperCAmelCase ,return_tensors="""pt""" ) _a : Tuple =encoding["""pixel_values"""] logger.info(F"Converting model {model_name}..." ) # load original model from torch hub _a : str =torch.hub.load("""DeppMeng/ConditionalDETR""" ,_UpperCAmelCase ,pretrained=_UpperCAmelCase ).eval() _a : str =conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: _a : Union[str, Any] ="""conditional_detr.""" + src rename_key(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) _a : List[Any] =rename_backbone_keys(_UpperCAmelCase ) # query, key and value matrices need special treatment read_in_q_k_v(_UpperCAmelCase ,is_panoptic=_UpperCAmelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _a : Tuple ="""conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""conditional_detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): _a : Any =state_dict.pop(_UpperCAmelCase ) _a : Optional[Any] =val elif "class_labels_classifier" in key or "bbox_predictor" in key: _a : Optional[int] =state_dict.pop(_UpperCAmelCase ) _a : int =val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: _a : Any =state_dict.pop(_UpperCAmelCase ) _a : List[str] =val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): _a : List[str] =state_dict.pop(_UpperCAmelCase ) _a : str =val # finally, create HuggingFace model and load state dict _a : Union[str, Any] =ConditionalDetrForSegmentation(_UpperCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_UpperCAmelCase ) model.load_state_dict(_UpperCAmelCase ) model.eval() model.push_to_hub(repo_id=_UpperCAmelCase ,organization="""DepuMeng""" ,commit_message="""Add model""" ) # verify our conversion _a : List[Any] =conditional_detr(_UpperCAmelCase ) _a : Tuple =model(_UpperCAmelCase ) assert torch.allclose(outputs.logits ,original_outputs["""pred_logits"""] ,atol=1e-4 ) assert torch.allclose(outputs.pred_boxes ,original_outputs["""pred_boxes"""] ,atol=1e-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks ,original_outputs["""pred_masks"""] ,atol=1e-4 ) # Save model and image processor logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) image_processor.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": A__: str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''conditional_detr_resnet50''', type=str, help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) A__: Optional[Any] = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
694
'''simple docstring''' import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A__: Tuple = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]: return field(default_factory=lambda: default ,metadata=_UpperCAmelCase ) @dataclass class A__ : __UpperCamelCase : List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) __UpperCamelCase : List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) __UpperCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) __UpperCamelCase : str = field( default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , ) __UpperCamelCase : str = field( default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , ) __UpperCamelCase : str = field( default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , ) __UpperCamelCase : str = field( default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , ) __UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' warnings.warn( f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils" """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[str]: '''simple docstring''' if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
694
1
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1 ,_UpperCAmelCase : int = 1000 ) -> int: _a : Dict =1 _a : Union[str, Any] =0 for divide_by_number in range(_UpperCAmelCase ,digit + 1 ): _a : list[int] =[] _a : int =numerator for _ in range(1 ,digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(_UpperCAmelCase ): _a : Optional[int] =len(_UpperCAmelCase ) _a : int =divide_by_number else: has_been_divided.append(_UpperCAmelCase ) _a : Optional[int] =now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
694
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class A__ ( UpperCAmelCase__ ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]: '''simple docstring''' _a : int =1.0 if scale is None else scale _a : Optional[Any] =0.0 if loc is None else loc super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] ) @property def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return self.base_dist.mean * self.scale + self.loc @property def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return self.base_dist.variance * self.scale**2 @property def __UpperCAmelCase ( self :Any ) -> List[str]: '''simple docstring''' return self.variance.sqrt() class A__ ( nn.Module ): def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE ) _a : Tuple =args_dim _a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] ) _a : Dict =domain_map def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]: '''simple docstring''' _a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj] return self.domain_map(*SCREAMING_SNAKE_CASE ) class A__ ( nn.Module ): def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int: '''simple docstring''' super().__init__() _a : List[Any] =function def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]: '''simple docstring''' return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) class A__ : __UpperCamelCase : type __UpperCamelCase : int __UpperCamelCase : Dict[str, int] def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None: '''simple docstring''' _a : Any =dim _a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim} def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]: '''simple docstring''' if self.dim == 1: return self.distribution_class(*SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution: '''simple docstring''' _a : str =self._base_distribution(SCREAMING_SNAKE_CASE ) if loc is None and scale is None: return distr else: return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim ) @property def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' return () if self.dim == 1 else (self.dim,) @property def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return len(self.event_shape ) @property def __UpperCAmelCase ( self :Any ) -> float: '''simple docstring''' return 0.0 def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module: '''simple docstring''' return ParameterProjection( in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any: '''simple docstring''' raise NotImplementedError() @staticmethod def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor: '''simple docstring''' return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0 class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __UpperCamelCase : type = StudentT @classmethod def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]: '''simple docstring''' _a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) _a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1} __UpperCamelCase : type = Normal @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict: '''simple docstring''' _a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1} __UpperCamelCase : type = NegativeBinomial @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]: '''simple docstring''' _a : int =cls.squareplus(SCREAMING_SNAKE_CASE ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution: '''simple docstring''' _a , _a : Any =distr_args if self.dim == 1: return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution: '''simple docstring''' _a , _a : Optional[int] =distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
694
1
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any]=1 ) -> Any: if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int=0 ) -> Tuple: _a : Any =[] for old_item in old_list: _a : List[Any] =old_item.replace("""in_layers.0""" ,"""norm1""" ) _a : int =new_item.replace("""in_layers.2""" ,"""conv1""" ) _a : Dict =new_item.replace("""out_layers.0""" ,"""norm2""" ) _a : Dict =new_item.replace("""out_layers.3""" ,"""conv2""" ) _a : Any =new_item.replace("""emb_layers.1""" ,"""time_emb_proj""" ) _a : Tuple =new_item.replace("""skip_connection""" ,"""conv_shortcut""" ) _a : Optional[Any] =shave_segments(_UpperCAmelCase ,n_shave_prefix_segments=_UpperCAmelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : str=0 ) -> Optional[Any]: _a : str =[] for old_item in old_list: _a : Union[str, Any] =old_item _a : Union[str, Any] =new_item.replace("""norm.weight""" ,"""group_norm.weight""" ) _a : str =new_item.replace("""norm.bias""" ,"""group_norm.bias""" ) _a : List[Any] =new_item.replace("""proj_out.weight""" ,"""proj_attn.weight""" ) _a : Optional[Any] =new_item.replace("""proj_out.bias""" ,"""proj_attn.bias""" ) _a : Tuple =shave_segments(_UpperCAmelCase ,n_shave_prefix_segments=_UpperCAmelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Any=None ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Any=None ) -> List[Any]: assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _a : Dict =old_checkpoint[path] _a : int =old_tensor.shape[0] // 3 _a : Any =(-1, channels) if len(old_tensor.shape ) == 3 else (-1) _a : Union[str, Any] =old_tensor.shape[0] // config["""num_head_channels"""] // 3 _a : Dict =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _a , _a , _a : Union[str, Any] =old_tensor.split(channels // num_heads ,dim=1 ) _a : Optional[int] =query.reshape(_UpperCAmelCase ) _a : Union[str, Any] =key.reshape(_UpperCAmelCase ) _a : Any =value.reshape(_UpperCAmelCase ) for path in paths: _a : List[str] =path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _a : List[Any] =new_path.replace("""middle_block.0""" ,"""mid_block.resnets.0""" ) _a : str =new_path.replace("""middle_block.1""" ,"""mid_block.attentions.0""" ) _a : List[str] =new_path.replace("""middle_block.2""" ,"""mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _a : Optional[Any] =new_path.replace(replacement["""old"""] ,replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _a : Optional[int] =old_checkpoint[path["""old"""]][:, :, 0] else: _a : List[Any] =old_checkpoint[path["""old"""]] def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : str ) -> Dict: _a : Dict ={} _a : Optional[Any] =checkpoint["""time_embed.0.weight"""] _a : Tuple =checkpoint["""time_embed.0.bias"""] _a : List[str] =checkpoint["""time_embed.2.weight"""] _a : int =checkpoint["""time_embed.2.bias"""] _a : int =checkpoint["""input_blocks.0.0.weight"""] _a : Union[str, Any] =checkpoint["""input_blocks.0.0.bias"""] _a : Tuple =checkpoint["""out.0.weight"""] _a : Dict =checkpoint["""out.0.bias"""] _a : Union[str, Any] =checkpoint["""out.2.weight"""] _a : Dict =checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _a : str =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _a : str ={ layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key] for layer_id in range(_UpperCAmelCase ) } # Retrieves the keys for the middle blocks only _a : Optional[Any] =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _a : Tuple ={ layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key] for layer_id in range(_UpperCAmelCase ) } # Retrieves the keys for the output blocks only _a : List[str] =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _a : str ={ layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key] for layer_id in range(_UpperCAmelCase ) } for i in range(1 ,_UpperCAmelCase ): _a : Union[str, Any] =(i - 1) // (config["""num_res_blocks"""] + 1) _a : List[str] =(i - 1) % (config["""num_res_blocks"""] + 1) _a : List[Any] =[key for key in input_blocks[i] if F"input_blocks.{i}.0" in key] _a : Tuple =[key for key in input_blocks[i] if F"input_blocks.{i}.1" in key] if F"input_blocks.{i}.0.op.weight" in checkpoint: _a : str =checkpoint[ F"input_blocks.{i}.0.op.weight" ] _a : Optional[int] =checkpoint[ F"input_blocks.{i}.0.op.bias" ] continue _a : int =renew_resnet_paths(_UpperCAmelCase ) _a : Union[str, Any] ={"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"} _a : str ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,additional_replacements=[meta_path, resnet_op] ,config=_UpperCAmelCase ) if len(_UpperCAmelCase ): _a : Any =renew_attention_paths(_UpperCAmelCase ) _a : List[str] ={ """old""": F"input_blocks.{i}.1", """new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}", } _a : str ={ F"input_blocks.{i}.1.qkv.bias": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"input_blocks.{i}.1.qkv.weight": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,additional_replacements=[meta_path] ,attention_paths_to_split=_UpperCAmelCase ,config=_UpperCAmelCase ,) _a : str =middle_blocks[0] _a : Optional[Any] =middle_blocks[1] _a : Tuple =middle_blocks[2] _a : Union[str, Any] =renew_resnet_paths(_UpperCAmelCase ) assign_to_checkpoint(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,config=_UpperCAmelCase ) _a : List[str] =renew_resnet_paths(_UpperCAmelCase ) assign_to_checkpoint(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,config=_UpperCAmelCase ) _a : List[Any] =renew_attention_paths(_UpperCAmelCase ) _a : Optional[Any] ={ """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,attention_paths_to_split=_UpperCAmelCase ,config=_UpperCAmelCase ) for i in range(_UpperCAmelCase ): _a : int =i // (config["""num_res_blocks"""] + 1) _a : str =i % (config["""num_res_blocks"""] + 1) _a : List[Any] =[shave_segments(_UpperCAmelCase ,2 ) for name in output_blocks[i]] _a : Optional[Any] ={} for layer in output_block_layers: _a , _a : Optional[Any] =layer.split(""".""" )[0], shave_segments(_UpperCAmelCase ,1 ) if layer_id in output_block_list: output_block_list[layer_id].append(_UpperCAmelCase ) else: _a : int =[layer_name] if len(_UpperCAmelCase ) > 1: _a : Tuple =[key for key in output_blocks[i] if F"output_blocks.{i}.0" in key] _a : List[Any] =[key for key in output_blocks[i] if F"output_blocks.{i}.1" in key] _a : Optional[Any] =renew_resnet_paths(_UpperCAmelCase ) _a : str =renew_resnet_paths(_UpperCAmelCase ) _a : Any ={"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,additional_replacements=[meta_path] ,config=_UpperCAmelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _a : Any =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _a : Any =checkpoint[ F"output_blocks.{i}.{index}.conv.weight" ] _a : Any =checkpoint[ F"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(_UpperCAmelCase ) == 2: _a : str =[] if len(_UpperCAmelCase ): _a : List[str] =renew_attention_paths(_UpperCAmelCase ) _a : Union[str, Any] ={ """old""": F"output_blocks.{i}.1", """new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}", } _a : Dict ={ F"output_blocks.{i}.1.qkv.bias": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"output_blocks.{i}.1.qkv.weight": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,additional_replacements=[meta_path] ,attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None ,config=_UpperCAmelCase ,) else: _a : Optional[Any] =renew_resnet_paths(_UpperCAmelCase ,n_shave_prefix_segments=1 ) for path in resnet_0_paths: _a : str =""".""".join(["""output_blocks""", str(_UpperCAmelCase ), path["""old"""]] ) _a : str =""".""".join(["""up_blocks""", str(_UpperCAmelCase ), """resnets""", str(_UpperCAmelCase ), path["""new"""]] ) _a : Optional[Any] =checkpoint[old_path] return new_checkpoint if __name__ == "__main__": A__: int = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') A__: str = parser.parse_args() A__: Optional[Any] = torch.load(args.checkpoint_path) with open(args.config_file) as f: A__: Any = json.loads(f.read()) A__: Tuple = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] A__: int = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: A__: Any = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A__: str = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A__: Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number | (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number & ~(1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number ^ (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool: return ((number >> position) & 1) == 1 def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
694
1
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> List[str]: assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" ,[False, True] ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Tuple ) -> List[Any]: _a : int =tmp_path / """cache""" _a : Any ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _a : Tuple =JsonDatasetReader(_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ).read() _check_json_dataset(_UpperCAmelCase ,_UpperCAmelCase ) @pytest.mark.parametrize( """features""" ,[ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] ,) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ) -> List[Any]: _a : int =tmp_path / """cache""" _a : Union[str, Any] ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _a : Union[str, Any] =features.copy() if features else default_expected_features _a : Optional[int] =( Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _a : Tuple =JsonDatasetReader(_UpperCAmelCase ,features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ).read() _check_json_dataset(_UpperCAmelCase ,_UpperCAmelCase ) @pytest.mark.parametrize( """features""" ,[ None, {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}, ] ,) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[Any] ) -> str: _a : List[str] =tmp_path / """cache""" _a : int ={"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""} _a : Union[str, Any] =features.copy() if features else default_expected_features _a : Union[str, Any] =( Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _a : List[str] =JsonDatasetReader(_UpperCAmelCase ,features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ).read() assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Tuple ) -> List[str]: # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} _a : List[str] ={"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""} _a : Optional[Any] =features.copy() _a : Union[str, Any] =( Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _a : Optional[Any] =tmp_path / """cache""" _a : Optional[Any] =JsonDatasetReader(_UpperCAmelCase ,features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ).read() assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""split""" ,[None, NamedSplit("""train""" ), """train""", """test"""] ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ) -> Union[str, Any]: _a : Optional[Any] =tmp_path / """cache""" _a : str ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _a : Dict =JsonDatasetReader(_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,split=_UpperCAmelCase ).read() _check_json_dataset(_UpperCAmelCase ,_UpperCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" ,[str, list] ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ,_UpperCAmelCase : int ) -> Dict: if issubclass(_UpperCAmelCase ,_UpperCAmelCase ): _a : Any =jsonl_path elif issubclass(_UpperCAmelCase ,_UpperCAmelCase ): _a : Optional[int] =[jsonl_path] _a : Dict =tmp_path / """cache""" _a : Optional[int] ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _a : List[Any] =JsonDatasetReader(_UpperCAmelCase ,cache_dir=_UpperCAmelCase ).read() _check_json_dataset(_UpperCAmelCase ,_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any]=("train",) ) -> str: assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) for split in splits: _a : List[str] =dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" ,[False, True] ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Union[str, Any] ) -> Any: _a : Optional[int] =tmp_path / """cache""" _a : List[str] ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _a : Any =JsonDatasetReader({"""train""": jsonl_path} ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ).read() _check_json_datasetdict(_UpperCAmelCase ,_UpperCAmelCase ) @pytest.mark.parametrize( """features""" ,[ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] ,) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Dict ,_UpperCAmelCase : int ) -> Any: _a : Optional[Any] =tmp_path / """cache""" _a : Optional[Any] ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _a : Optional[int] =features.copy() if features else default_expected_features _a : Tuple =( Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _a : List[Any] =JsonDatasetReader({"""train""": jsonl_path} ,features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ).read() _check_json_datasetdict(_UpperCAmelCase ,_UpperCAmelCase ) @pytest.mark.parametrize("""split""" ,[None, NamedSplit("""train""" ), """train""", """test"""] ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Dict ) -> Union[str, Any]: if split: _a : int ={split: jsonl_path} else: _a : List[Any] ="""train""" _a : Optional[Any] ={"""train""": jsonl_path, """test""": jsonl_path} _a : Optional[Any] =tmp_path / """cache""" _a : Dict ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _a : Optional[int] =JsonDatasetReader(_UpperCAmelCase ,cache_dir=_UpperCAmelCase ).read() _check_json_datasetdict(_UpperCAmelCase ,_UpperCAmelCase ,splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ) -> str: return json.load(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: return [json.loads(_UpperCAmelCase ) for line in buffer] class A__ : @pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] ) def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Tuple: '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , lines=SCREAMING_SNAKE_CASE ).write() buffer.seek(0 ) _a : Optional[int] =load_json_function(SCREAMING_SNAKE_CASE ) assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE ) assert len(SCREAMING_SNAKE_CASE ) == 1_0 @pytest.mark.parametrize( """orient, container, keys, len_at""" , [ ("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None), ("""split""", dict, {"""columns""", """data"""}, """data"""), ("""index""", dict, set("""0123456789""" ), None), ("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""), ("""values""", list, None, None), ("""table""", dict, {"""schema""", """data"""}, """data"""), ] , ) def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Any ) -> Any: '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , lines=SCREAMING_SNAKE_CASE , orient=SCREAMING_SNAKE_CASE ).write() buffer.seek(0 ) _a : Optional[int] =load_json(SCREAMING_SNAKE_CASE ) assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(SCREAMING_SNAKE_CASE , """keys""" ) and not hasattr(exported_content[0] , """keys""" ) if len_at: assert len(exported_content[len_at] ) == 1_0 else: assert len(SCREAMING_SNAKE_CASE ) == 1_0 @pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[Any] ) -> Tuple: '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , lines=SCREAMING_SNAKE_CASE , num_proc=2 ).write() buffer.seek(0 ) _a : Any =load_json_function(SCREAMING_SNAKE_CASE ) assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE ) assert len(SCREAMING_SNAKE_CASE ) == 1_0 @pytest.mark.parametrize( """orient, container, keys, len_at""" , [ ("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None), ("""split""", dict, {"""columns""", """data"""}, """data"""), ("""index""", dict, set("""0123456789""" ), None), ("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""), ("""values""", list, None, None), ("""table""", dict, {"""schema""", """data"""}, """data"""), ] , ) def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Tuple ) -> List[str]: '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , lines=SCREAMING_SNAKE_CASE , orient=SCREAMING_SNAKE_CASE , num_proc=2 ).write() buffer.seek(0 ) _a : Union[str, Any] =load_json(SCREAMING_SNAKE_CASE ) assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(SCREAMING_SNAKE_CASE , """keys""" ) and not hasattr(exported_content[0] , """keys""" ) if len_at: assert len(exported_content[len_at] ) == 1_0 else: assert len(SCREAMING_SNAKE_CASE ) == 1_0 def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Optional[int] ) -> List[str]: '''simple docstring''' with pytest.raises(SCREAMING_SNAKE_CASE ): with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=0 ) @pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] ) def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Tuple ) -> Optional[int]: '''simple docstring''' _a : Tuple =tmp_path_factory.mktemp("""data""" ) / f"test.json.{extension}" _a : int =str(shared_datadir / f"test_file.json.{extension}" ) JsonDatasetWriter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , compression=SCREAMING_SNAKE_CASE ).write() with fsspec.open(SCREAMING_SNAKE_CASE , """rb""" , compression="""infer""" ) as f: _a : Tuple =f.read() with fsspec.open(SCREAMING_SNAKE_CASE , """rb""" , compression="""infer""" ) as f: _a : Tuple =f.read() assert exported_content == original_content
694
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None: if point: if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): for item in point: if not isinstance(_UpperCAmelCase ,(int, float) ): _a : str =( """Expected a list of numbers as input, found """ F"{type(_UpperCAmelCase ).__name__}" ) raise TypeError(_UpperCAmelCase ) else: _a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}" raise TypeError(_UpperCAmelCase ) else: raise ValueError("""Missing an input""" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
694
1