code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm _snake_case = re.compile('''[^A-Za-z_0-9]''') # parameters used in DuplicationIndex _snake_case = 10 _snake_case = 256 def _UpperCamelCase ( snake_case__ ) -> Optional[MinHash]: if len(snake_case__ ) < MIN_NUM_TOKENS: return None __UpperCAmelCase : List[str] = MinHash(num_perm=snake_case__ ) for token in set(snake_case__ ): min_hash.update(token.encode() ) return min_hash def _UpperCamelCase ( snake_case__ ) -> Set[str]: return {t for t in NON_ALPHA.split(snake_case__ ) if len(t.strip() ) > 0} class _snake_case : def __init__( self: List[Any] , *, __lowerCamelCase: float = 0.85 , ) -> Any: __UpperCAmelCase : Union[str, Any] = duplication_jaccard_threshold __UpperCAmelCase : Dict = NUM_PERM __UpperCAmelCase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) __UpperCAmelCase : Tuple = defaultdict(__lowerCamelCase ) def _lowerCamelCase ( self: int , __lowerCamelCase: Tuple , __lowerCamelCase: MinHash ) -> None: __UpperCAmelCase : Any = self._index.query(__lowerCamelCase ) if code_key in self._index.keys: print(f'''Duplicate key {code_key}''' ) return self._index.insert(__lowerCamelCase , __lowerCamelCase ) if len(__lowerCamelCase ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(__lowerCamelCase ) break else: self._duplicate_clusters[close_duplicates[0]].add(__lowerCamelCase ) def _lowerCamelCase ( self: int ) -> List[List[Dict]]: __UpperCAmelCase : str = [] for base, duplicates in self._duplicate_clusters.items(): __UpperCAmelCase : int = [base] + list(__lowerCamelCase ) # reformat the cluster to be a list of dict __UpperCAmelCase : Union[str, Any] = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster] duplicate_clusters.append(__lowerCamelCase ) return duplicate_clusters def _lowerCamelCase ( self: str , __lowerCamelCase: int ) -> None: __UpperCAmelCase : List[Any] = self.get_duplicate_clusters() with open(__lowerCamelCase , "w" ) as f: json.dump(__lowerCamelCase , __lowerCamelCase ) def _UpperCamelCase ( snake_case__ ) -> int: __UpperCAmelCase , __UpperCAmelCase : Tuple = element __UpperCAmelCase : Union[str, Any] = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def _UpperCamelCase ( snake_case__ ) -> Optional[int]: with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash, ThreadedIterator(snake_case__, max_queue_size=1_0000 ), chunksize=100, ): if data is not None: yield data def _UpperCamelCase ( snake_case__, snake_case__ ) -> Tuple: __UpperCAmelCase : Any = DuplicationIndex(duplication_jaccard_threshold=snake_case__ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(snake_case__ ) ), max_queue_size=100 ) ): di.add(snake_case__, snake_case__ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def _UpperCamelCase ( snake_case__, snake_case__ ) -> float: __UpperCAmelCase : List[Any] = get_tokens(snake_case__ ) __UpperCAmelCase : List[Any] = get_tokens(snake_case__ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) _snake_case = None def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[str]: __UpperCAmelCase : Tuple = [] for elementa in cluster: __UpperCAmelCase : List[Any] = _shared_dataset[elementa["base_index"]]["content"] for elementa in extremes: __UpperCAmelCase : Union[str, Any] = _shared_dataset[elementa["base_index"]]["content"] if jaccard_similarity(snake_case__, snake_case__ ) >= jaccard_threshold: elementa["copies"] += 1 break else: __UpperCAmelCase : Any = 1 extremes.append(snake_case__ ) return extremes def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict: global _shared_dataset __UpperCAmelCase : Any = dataset __UpperCAmelCase : str = [] __UpperCAmelCase : Dict = partial(_find_cluster_extremes_shared, jaccard_threshold=snake_case__ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( snake_case__, snake_case__, ), total=len(snake_case__ ), ): extremes_list.append(snake_case__ ) return extremes_list def _UpperCamelCase ( snake_case__, snake_case__ = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]: __UpperCAmelCase : Any = make_duplicate_clusters(snake_case__, snake_case__ ) __UpperCAmelCase : Any = {x["base_index"] for cluster in duplicate_clusters for x in cluster} __UpperCAmelCase : List[str] = {} __UpperCAmelCase : int = find_extremes(snake_case__, snake_case__, snake_case__ ) for extremes in extremes_clusters: for element in extremes: __UpperCAmelCase : Union[str, Any] = element __UpperCAmelCase : Tuple = duplicate_indices - set(extreme_dict.keys() ) __UpperCAmelCase : List[Any] = dataset.filter(lambda snake_case__, snake_case__ : idx not in remove_indices, with_indices=snake_case__ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: __UpperCAmelCase : Union[str, Any] = element["base_index"] in extreme_dict if element["is_extreme"]: __UpperCAmelCase : Union[str, Any] = extreme_dict[element["base_index"]]["copies"] print(f'''Original dataset size: {len(snake_case__ )}''' ) print(f'''Number of duplicate clusters: {len(snake_case__ )}''' ) print(f'''Files in duplicate cluster: {len(snake_case__ )}''' ) print(f'''Unique files in duplicate cluster: {len(snake_case__ )}''' ) print(f'''Filtered dataset size: {len(snake_case__ )}''' ) return ds_filter, duplicate_clusters
342
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def _UpperCamelCase ( snake_case__ ) -> Tuple: __UpperCAmelCase : Union[str, Any] = 384 if "tiny" in model_name: __UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3] __UpperCAmelCase : List[Any] = [96, 192, 384, 768] if "small" in model_name: __UpperCAmelCase : Tuple = [3, 3, 27, 3] __UpperCAmelCase : Any = [96, 192, 384, 768] if "base" in model_name: __UpperCAmelCase : str = [3, 3, 27, 3] __UpperCAmelCase : str = [128, 256, 512, 1024] __UpperCAmelCase : str = 512 if "large" in model_name: __UpperCAmelCase : Dict = [3, 3, 27, 3] __UpperCAmelCase : int = [192, 384, 768, 1536] __UpperCAmelCase : Dict = 768 if "xlarge" in model_name: __UpperCAmelCase : List[Any] = [3, 3, 27, 3] __UpperCAmelCase : Tuple = [256, 512, 1024, 2048] __UpperCAmelCase : int = 1024 # set label information __UpperCAmelCase : List[Any] = 150 __UpperCAmelCase : str = "huggingface/label-files" __UpperCAmelCase : List[Any] = "ade20k-id2label.json" __UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) ) __UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()} __UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()} __UpperCAmelCase : int = ConvNextConfig( depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] ) __UpperCAmelCase : int = UperNetConfig( backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, ) return config def _UpperCamelCase ( snake_case__ ) -> Tuple: __UpperCAmelCase : Optional[int] = [] # fmt: off # stem rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") ) rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") ) rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") ) rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any: __UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ ) __UpperCAmelCase : Optional[int] = val def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]: __UpperCAmelCase : Dict = { "upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth", "upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth", "upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth", "upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth", "upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth", } __UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name] __UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"] __UpperCAmelCase : Dict = get_upernet_config(snake_case__ ) __UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): __UpperCAmelCase : str = state_dict.pop(snake_case__ ) if "bn" in key: __UpperCAmelCase : int = key.replace("bn", "batch_norm" ) __UpperCAmelCase : Union[str, Any] = val # rename keys __UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__, snake_case__, snake_case__ ) model.load_state_dict(snake_case__ ) # verify on image __UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" __UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" ) __UpperCAmelCase : str = SegformerImageProcessor() __UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values with torch.no_grad(): __UpperCAmelCase : Union[str, Any] = model(snake_case__ ) if model_name == "upernet-convnext-tiny": __UpperCAmelCase : Any = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ) elif model_name == "upernet-convnext-small": __UpperCAmelCase : Optional[Any] = torch.tensor( [[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] ) elif model_name == "upernet-convnext-base": __UpperCAmelCase : Dict = torch.tensor( [[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] ) elif model_name == "upernet-convnext-large": __UpperCAmelCase : Tuple = torch.tensor( [[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] ) elif model_name == "upernet-convnext-xlarge": __UpperCAmelCase : Union[str, Any] = torch.tensor( [[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] ) print("Logits:", outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case__ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(snake_case__ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-convnext-tiny''', type=str, choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']], help='''Name of the ConvNext UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _snake_case = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
342
1
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all BART models at https://huggingface.co/models?filter=bart _snake_case = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, } _snake_case = { '''facebook/bart-base''': 1024, '''facebook/bart-large''': 1024, '''facebook/bart-large-mnli''': 1024, '''facebook/bart-large-cnn''': 1024, '''facebook/bart-large-xsum''': 1024, '''yjernite/bart_eli5''': 1024, } @lru_cache() def _UpperCamelCase ( ) -> Tuple: __UpperCAmelCase : Optional[Any] = ( list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) ) ) __UpperCAmelCase : Any = bs[:] __UpperCAmelCase : Tuple = 0 for b in range(2**8 ): if b not in bs: bs.append(snake_case__ ) cs.append(2**8 + n ) n += 1 __UpperCAmelCase : Union[str, Any] = [chr(snake_case__ ) for n in cs] return dict(zip(snake_case__, snake_case__ ) ) def _UpperCamelCase ( snake_case__ ) -> Optional[Any]: __UpperCAmelCase : Any = set() __UpperCAmelCase : Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase : Optional[int] = char return pairs class _snake_case ( _lowercase ): lowerCamelCase__: Optional[int] = VOCAB_FILES_NAMES lowerCamelCase__: Any = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__: str = ["input_ids", "attention_mask"] def __init__( self: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Optional[int]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: Optional[int]="</s>" , __lowerCamelCase: Optional[Any]="</s>" , __lowerCamelCase: Optional[int]="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: int="<mask>" , __lowerCamelCase: List[Any]=False , **__lowerCamelCase: Tuple , ) -> Optional[int]: __UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token __UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token __UpperCAmelCase : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token __UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token __UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token __UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: __UpperCAmelCase : int = json.load(__lowerCamelCase ) __UpperCAmelCase : List[Any] = {v: k for k, v in self.encoder.items()} __UpperCAmelCase : Optional[int] = errors # how to handle errors in decoding __UpperCAmelCase : Tuple = bytes_to_unicode() __UpperCAmelCase : str = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: __UpperCAmelCase : Optional[int] = merges_handle.read().split("\n" )[1:-1] __UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges] __UpperCAmelCase : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) __UpperCAmelCase : Optional[int] = {} __UpperCAmelCase : List[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __UpperCAmelCase : List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def _lowerCamelCase ( self: Dict ) -> Union[str, Any]: return len(self.encoder ) def _lowerCamelCase ( self: Optional[Any] ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def _lowerCamelCase ( self: str , __lowerCamelCase: Any ) -> Dict: if token in self.cache: return self.cache[token] __UpperCAmelCase : Any = tuple(__lowerCamelCase ) __UpperCAmelCase : List[Any] = get_pairs(__lowerCamelCase ) if not pairs: return token while True: __UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = bigram __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : Optional[int] = 0 while i < len(__lowerCamelCase ): try: __UpperCAmelCase : str = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase : Dict = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase : int = tuple(__lowerCamelCase ) __UpperCAmelCase : Tuple = new_word if len(__lowerCamelCase ) == 1: break else: __UpperCAmelCase : Dict = get_pairs(__lowerCamelCase ) __UpperCAmelCase : List[Any] = " ".join(__lowerCamelCase ) __UpperCAmelCase : str = word return word def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Dict ) -> Dict: __UpperCAmelCase : int = [] for token in re.findall(self.pat , __lowerCamelCase ): __UpperCAmelCase : Dict = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _lowerCamelCase ( self: Any , __lowerCamelCase: str ) -> Tuple: return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: List[Any] ) -> Tuple: return self.decoder.get(__lowerCamelCase ) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: List[str] ) -> Tuple: __UpperCAmelCase : Optional[Any] = "".join(__lowerCamelCase ) __UpperCAmelCase : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _lowerCamelCase ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase : Optional[Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __UpperCAmelCase : Union[str, Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) __UpperCAmelCase : Tuple = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) __UpperCAmelCase : Any = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase : Optional[Any] = [self.cls_token_id] __UpperCAmelCase : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]: __UpperCAmelCase : Dict = [self.sep_token_id] __UpperCAmelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any]=False , **__lowerCamelCase: int ) -> List[Any]: __UpperCAmelCase : Dict = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): __UpperCAmelCase : int = " " + text return (text, kwargs)
342
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class _snake_case ( _lowercase ): lowerCamelCase__: Dict = "roc_bert" def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]: __UpperCAmelCase : str = vocab_size __UpperCAmelCase : Dict = max_position_embeddings __UpperCAmelCase : Optional[Any] = hidden_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : List[Any] = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : Optional[int] = attention_probs_dropout_prob __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : Optional[Any] = type_vocab_size __UpperCAmelCase : List[Any] = layer_norm_eps __UpperCAmelCase : Optional[int] = use_cache __UpperCAmelCase : Optional[Any] = enable_pronunciation __UpperCAmelCase : Any = enable_shape __UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim __UpperCAmelCase : Optional[Any] = pronunciation_vocab_size __UpperCAmelCase : Optional[Any] = shape_embed_dim __UpperCAmelCase : List[Any] = shape_vocab_size __UpperCAmelCase : int = concat_input __UpperCAmelCase : int = position_embedding_type __UpperCAmelCase : Optional[int] = classifier_dropout super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
342
1
import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) _snake_case = logging.getLogger() def _UpperCamelCase ( ) -> Union[str, Any]: __UpperCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument("-f" ) __UpperCAmelCase : Tuple = parser.parse_args() return args.f class _snake_case ( _lowercase ): def _lowerCamelCase ( self: Tuple ) -> None: __UpperCAmelCase : Dict = logging.StreamHandler(sys.stdout ) logger.addHandler(__lowerCamelCase ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Any ) -> Optional[int]: __UpperCAmelCase : Optional[Any] = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , "run_glue_deebert.py" ) with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): __UpperCAmelCase : Any = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(__lowerCamelCase , 0.6_66 ) @slow @require_torch_non_multi_gpu def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]: __UpperCAmelCase : Optional[int] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split() self.run_and_check(__lowerCamelCase ) __UpperCAmelCase : int = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(__lowerCamelCase ) __UpperCAmelCase : Any = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(__lowerCamelCase )
342
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def _UpperCamelCase ( snake_case__ ) -> int: __UpperCAmelCase : int = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: __UpperCAmelCase : int = [144, 192, 240] __UpperCAmelCase : Optional[Any] = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: __UpperCAmelCase : Optional[Any] = [96, 120, 144] __UpperCAmelCase : Tuple = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: __UpperCAmelCase : str = [64, 80, 96] __UpperCAmelCase : Optional[Any] = [16, 16, 24, 48, 64, 80, 320] __UpperCAmelCase : Tuple = 0.05 __UpperCAmelCase : Dict = 2.0 if mobilevit_name.startswith("deeplabv3_" ): __UpperCAmelCase : str = 512 __UpperCAmelCase : Any = 16 __UpperCAmelCase : str = 21 __UpperCAmelCase : Union[str, Any] = "pascal-voc-id2label.json" else: __UpperCAmelCase : Optional[Any] = 1000 __UpperCAmelCase : int = "imagenet-1k-id2label.json" __UpperCAmelCase : Dict = "huggingface/label-files" __UpperCAmelCase : int = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) ) __UpperCAmelCase : Any = {int(snake_case__ ): v for k, v in idalabel.items()} __UpperCAmelCase : int = idalabel __UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()} return config def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Tuple: for i in range(1, 6 ): if f'''layer_{i}.''' in name: __UpperCAmelCase : Tuple = name.replace(f'''layer_{i}.''', f'''encoder.layer.{i - 1}.''' ) if "conv_1." in name: __UpperCAmelCase : Dict = name.replace("conv_1.", "conv_stem." ) if ".block." in name: __UpperCAmelCase : Optional[int] = name.replace(".block.", "." ) if "exp_1x1" in name: __UpperCAmelCase : Tuple = name.replace("exp_1x1", "expand_1x1" ) if "red_1x1" in name: __UpperCAmelCase : Optional[Any] = name.replace("red_1x1", "reduce_1x1" ) if ".local_rep.conv_3x3." in name: __UpperCAmelCase : Optional[int] = name.replace(".local_rep.conv_3x3.", ".conv_kxk." ) if ".local_rep.conv_1x1." in name: __UpperCAmelCase : Any = name.replace(".local_rep.conv_1x1.", ".conv_1x1." ) if ".norm." in name: __UpperCAmelCase : Dict = name.replace(".norm.", ".normalization." ) if ".conv." in name: __UpperCAmelCase : List[Any] = name.replace(".conv.", ".convolution." ) if ".conv_proj." in name: __UpperCAmelCase : List[str] = name.replace(".conv_proj.", ".conv_projection." ) for i in range(0, 2 ): for j in range(0, 4 ): if f'''.{i}.{j}.''' in name: __UpperCAmelCase : List[Any] = name.replace(f'''.{i}.{j}.''', f'''.{i}.layer.{j}.''' ) for i in range(2, 6 ): for j in range(0, 4 ): if f'''.{i}.{j}.''' in name: __UpperCAmelCase : Any = name.replace(f'''.{i}.{j}.''', f'''.{i}.''' ) if "expand_1x1" in name: __UpperCAmelCase : Optional[int] = name.replace("expand_1x1", "downsampling_layer.expand_1x1" ) if "conv_3x3" in name: __UpperCAmelCase : List[Any] = name.replace("conv_3x3", "downsampling_layer.conv_3x3" ) if "reduce_1x1" in name: __UpperCAmelCase : Dict = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" ) for i in range(2, 5 ): if f'''.global_rep.{i}.weight''' in name: __UpperCAmelCase : Any = name.replace(f'''.global_rep.{i}.weight''', ".layernorm.weight" ) if f'''.global_rep.{i}.bias''' in name: __UpperCAmelCase : Optional[Any] = name.replace(f'''.global_rep.{i}.bias''', ".layernorm.bias" ) if ".global_rep." in name: __UpperCAmelCase : Tuple = name.replace(".global_rep.", ".transformer." ) if ".pre_norm_mha.0." in name: __UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_mha.0.", ".layernorm_before." ) if ".pre_norm_mha.1.out_proj." in name: __UpperCAmelCase : Tuple = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." ) if ".pre_norm_ffn.0." in name: __UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_ffn.0.", ".layernorm_after." ) if ".pre_norm_ffn.1." in name: __UpperCAmelCase : Dict = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." ) if ".pre_norm_ffn.4." in name: __UpperCAmelCase : int = name.replace(".pre_norm_ffn.4.", ".output.dense." ) if ".transformer." in name: __UpperCAmelCase : Tuple = name.replace(".transformer.", ".transformer.layer." ) if ".aspp_layer." in name: __UpperCAmelCase : Any = name.replace(".aspp_layer.", "." ) if ".aspp_pool." in name: __UpperCAmelCase : Optional[Any] = name.replace(".aspp_pool.", "." ) if "seg_head." in name: __UpperCAmelCase : Optional[int] = name.replace("seg_head.", "segmentation_head." ) if "segmentation_head.classifier.classifier." in name: __UpperCAmelCase : str = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." ) if "classifier.fc." in name: __UpperCAmelCase : Optional[Any] = name.replace("classifier.fc.", "classifier." ) elif (not base_model) and ("segmentation_head." not in name): __UpperCAmelCase : List[str] = "mobilevit." + name return name def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=False ) -> Union[str, Any]: if base_model: __UpperCAmelCase : Optional[int] = "" else: __UpperCAmelCase : Tuple = "mobilevit." for key in orig_state_dict.copy().keys(): __UpperCAmelCase : Optional[int] = orig_state_dict.pop(snake_case__ ) if key[:8] == "encoder.": __UpperCAmelCase : str = key[8:] if "qkv" in key: __UpperCAmelCase : Tuple = key.split("." ) __UpperCAmelCase : List[Any] = int(key_split[0][6:] ) - 1 __UpperCAmelCase : Optional[Any] = int(key_split[3] ) __UpperCAmelCase : Tuple = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' ) __UpperCAmelCase : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size __UpperCAmelCase : Optional[Any] = ( f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.''' ) if "weight" in key: __UpperCAmelCase : Any = val[:dim, :] __UpperCAmelCase : Any = val[dim : dim * 2, :] __UpperCAmelCase : List[Any] = val[-dim:, :] else: __UpperCAmelCase : List[str] = val[:dim] __UpperCAmelCase : Optional[Any] = val[dim : dim * 2] __UpperCAmelCase : List[Any] = val[-dim:] else: __UpperCAmelCase : str = val return orig_state_dict def _UpperCamelCase ( ) -> Any: __UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" __UpperCAmelCase : List[str] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ) return im @torch.no_grad() def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=False ) -> Optional[Any]: __UpperCAmelCase : Tuple = get_mobilevit_config(snake_case__ ) # load original state_dict __UpperCAmelCase : str = torch.load(snake_case__, map_location="cpu" ) # load 🤗 model if mobilevit_name.startswith("deeplabv3_" ): __UpperCAmelCase : Optional[int] = MobileViTForSemanticSegmentation(snake_case__ ).eval() else: __UpperCAmelCase : List[Any] = MobileViTForImageClassification(snake_case__ ).eval() __UpperCAmelCase : Dict = convert_state_dict(snake_case__, snake_case__ ) model.load_state_dict(snake_case__ ) # Check outputs on an image, prepared by MobileViTImageProcessor __UpperCAmelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 ) __UpperCAmelCase : Any = image_processor(images=prepare_img(), return_tensors="pt" ) __UpperCAmelCase : Dict = model(**snake_case__ ) __UpperCAmelCase : Tuple = outputs.logits if mobilevit_name.startswith("deeplabv3_" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": __UpperCAmelCase : int = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": __UpperCAmelCase : Tuple = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": __UpperCAmelCase : Any = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ] ) else: raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3, :3, :3], snake_case__, atol=1e-4 ) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": __UpperCAmelCase : str = torch.tensor([-0.9866, 0.2392, -1.1241] ) elif mobilevit_name == "mobilevit_xs": __UpperCAmelCase : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] ) elif mobilevit_name == "mobilevit_xxs": __UpperCAmelCase : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ) else: raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3], snake_case__, atol=1e-4 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case__ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case__ ) if push_to_hub: __UpperCAmelCase : List[str] = { "mobilevit_s": "mobilevit-small", "mobilevit_xs": "mobilevit-x-small", "mobilevit_xxs": "mobilevit-xx-small", "deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small", "deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small", "deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small", } print("Pushing to the hub..." ) __UpperCAmelCase : int = model_mapping[mobilevit_name] image_processor.push_to_hub(snake_case__, organization="apple" ) model.push_to_hub(snake_case__, organization="apple" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--mobilevit_name''', default='''mobilevit_s''', type=str, help=( '''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',''' ''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.''' ), ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _snake_case = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
342
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _snake_case = logging.get_logger(__name__) _snake_case = { '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class _snake_case ( _lowercase , _lowercase ): lowerCamelCase__: Any = "swin" lowerCamelCase__: List[str] = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: Optional[Any] , __lowerCamelCase: str=2_24 , __lowerCamelCase: Optional[Any]=4 , __lowerCamelCase: Union[str, Any]=3 , __lowerCamelCase: Union[str, Any]=96 , __lowerCamelCase: Union[str, Any]=[2, 2, 6, 2] , __lowerCamelCase: Optional[Any]=[3, 6, 12, 24] , __lowerCamelCase: str=7 , __lowerCamelCase: Tuple=4.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.1 , __lowerCamelCase: List[Any]="gelu" , __lowerCamelCase: Optional[Any]=False , __lowerCamelCase: str=0.02 , __lowerCamelCase: Any=1e-5 , __lowerCamelCase: List[Any]=32 , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Union[str, Any]=None , **__lowerCamelCase: Optional[Any] , ) -> Union[str, Any]: super().__init__(**__lowerCamelCase ) __UpperCAmelCase : Any = image_size __UpperCAmelCase : Optional[Any] = patch_size __UpperCAmelCase : List[Any] = num_channels __UpperCAmelCase : Any = embed_dim __UpperCAmelCase : str = depths __UpperCAmelCase : Tuple = len(__lowerCamelCase ) __UpperCAmelCase : List[Any] = num_heads __UpperCAmelCase : Optional[int] = window_size __UpperCAmelCase : Optional[int] = mlp_ratio __UpperCAmelCase : str = qkv_bias __UpperCAmelCase : Any = hidden_dropout_prob __UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob __UpperCAmelCase : Dict = drop_path_rate __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Optional[int] = use_absolute_embeddings __UpperCAmelCase : Dict = layer_norm_eps __UpperCAmelCase : List[Any] = initializer_range __UpperCAmelCase : str = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __UpperCAmelCase : Any = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) ) __UpperCAmelCase : List[str] = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(__lowerCamelCase ) + 1 )] __UpperCAmelCase , __UpperCAmelCase : List[str] = get_aligned_output_features_output_indices( out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names ) class _snake_case ( _lowercase ): lowerCamelCase__: Optional[Any] = version.parse("1.11" ) @property def _lowerCamelCase ( self: Tuple ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _lowerCamelCase ( self: int ) -> float: return 1e-4
342
import math _snake_case = 10 _snake_case = 7 _snake_case = BALLS_PER_COLOUR * NUM_COLOURS def _UpperCamelCase ( snake_case__ = 20 ) -> str: __UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ ) __UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ ) __UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total) return f'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
342
1
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def _UpperCamelCase ( snake_case__ = 8 ) -> str: __UpperCAmelCase : Optional[Any] = ascii_letters + digits + punctuation return "".join(secrets.choice(snake_case__ ) for _ in range(snake_case__ ) ) def _UpperCamelCase ( snake_case__, snake_case__ ) -> str: # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(snake_case__ ) __UpperCAmelCase : Any = i // 3 __UpperCAmelCase : Optional[Any] = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) __UpperCAmelCase : Union[str, Any] = ( chars_incl + random(snake_case__, quotient + remainder ) + random(snake_case__, snake_case__ ) + random(snake_case__, snake_case__ ) ) __UpperCAmelCase : Tuple = list(snake_case__ ) shuffle(snake_case__ ) return "".join(snake_case__ ) # random is a generalised function for letters, characters and numbers def _UpperCamelCase ( snake_case__, snake_case__ ) -> str: return "".join(secrets.choice(snake_case__ ) for _ in range(snake_case__ ) ) def _UpperCamelCase ( snake_case__, snake_case__ ) -> int: pass # Put your code here... def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[Any]: pass # Put your code here... def _UpperCamelCase ( snake_case__, snake_case__ ) -> int: pass # Put your code here... def _UpperCamelCase ( snake_case__, snake_case__ = 8 ) -> bool: if len(snake_case__ ) < min_length: # Your Password must be at least 8 characters long return False __UpperCAmelCase : Optional[Any] = any(char in ascii_uppercase for char in password ) __UpperCAmelCase : str = any(char in ascii_lowercase for char in password ) __UpperCAmelCase : str = any(char in digits for char in password ) __UpperCAmelCase : Optional[Any] = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def _UpperCamelCase ( ) -> Any: __UpperCAmelCase : str = int(input("Please indicate the max length of your password: " ).strip() ) __UpperCAmelCase : Any = input( "Please indicate the characters that must be in your password: " ).strip() print("Password generated:", password_generator(snake_case__ ) ) print( "Alternative Password generated:", alternative_password_generator(snake_case__, snake_case__ ), ) print("[If you are thinking of using this passsword, You better save it.]" ) if __name__ == "__main__": main()
342
def _UpperCamelCase ( snake_case__ ) -> int: __UpperCAmelCase : int = [0] * len(snake_case__ ) __UpperCAmelCase : Union[str, Any] = [] __UpperCAmelCase : str = [1] * len(snake_case__ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(snake_case__ ) ): if indegree[i] == 0: queue.append(snake_case__ ) while queue: __UpperCAmelCase : List[str] = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: __UpperCAmelCase : str = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(snake_case__ ) print(max(snake_case__ ) ) # Adjacency list of Graph _snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
342
1
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar _snake_case = TypeVar('''T''') class _snake_case ( Generic[T] ): def __init__( self: List[str] , __lowerCamelCase: list[T] , __lowerCamelCase: Callable[[T, T], T] ) -> None: __UpperCAmelCase : Any | T = None __UpperCAmelCase : int = len(__lowerCamelCase ) __UpperCAmelCase : list[T] = [any_type for _ in range(self.N )] + arr __UpperCAmelCase : str = fnc self.build() def _lowerCamelCase ( self: Union[str, Any] ) -> None: for p in range(self.N - 1 , 0 , -1 ): __UpperCAmelCase : Optional[int] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def _lowerCamelCase ( self: str , __lowerCamelCase: int , __lowerCamelCase: T ) -> None: p += self.N __UpperCAmelCase : List[str] = v while p > 1: __UpperCAmelCase : Optional[int] = p // 2 __UpperCAmelCase : List[str] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def _lowerCamelCase ( self: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> T | None: # noqa: E741 __UpperCAmelCase , __UpperCAmelCase : List[Any] = l + self.N, r + self.N __UpperCAmelCase : T | None = None while l <= r: if l % 2 == 1: __UpperCAmelCase : Any = self.st[l] if res is None else self.fn(__lowerCamelCase , self.st[l] ) if r % 2 == 0: __UpperCAmelCase : str = self.st[r] if res is None else self.fn(__lowerCamelCase , self.st[r] ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce _snake_case = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] _snake_case = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } _snake_case = SegmentTree(test_array, min) _snake_case = SegmentTree(test_array, max) _snake_case = SegmentTree(test_array, lambda a, b: a + b) def _UpperCamelCase ( ) -> None: for i in range(len(snake_case__ ) ): for j in range(snake_case__, len(snake_case__ ) ): __UpperCAmelCase : List[Any] = reduce(snake_case__, test_array[i : j + 1] ) __UpperCAmelCase : Optional[int] = reduce(snake_case__, test_array[i : j + 1] ) __UpperCAmelCase : Optional[int] = reduce(lambda snake_case__, snake_case__ : a + b, test_array[i : j + 1] ) assert min_range == min_segment_tree.query(snake_case__, snake_case__ ) assert max_range == max_segment_tree.query(snake_case__, snake_case__ ) assert sum_range == sum_segment_tree.query(snake_case__, snake_case__ ) test_all_segments() for index, value in test_updates.items(): _snake_case = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
342
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _snake_case = { '''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''], '''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''], '''processing_whisper''': ['''WhisperProcessor'''], '''tokenization_whisper''': ['''WhisperTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''WhisperTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WhisperForConditionalGeneration''', '''WhisperModel''', '''WhisperPreTrainedModel''', '''WhisperForAudioClassification''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWhisperForConditionalGeneration''', '''TFWhisperModel''', '''TFWhisperPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''FlaxWhisperForConditionalGeneration''', '''FlaxWhisperModel''', '''FlaxWhisperPreTrainedModel''', '''FlaxWhisperForAudioClassification''', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
342
1
from __future__ import annotations from math import pi def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]: if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
342
from __future__ import annotations from math import pi def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]: if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
342
1
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def _UpperCamelCase ( snake_case__, snake_case__=7 ) -> Optional[Any]: __UpperCAmelCase : Dict = None if token is not None: __UpperCAmelCase : Any = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''} # The id of a workflow (not of a workflow run) __UpperCAmelCase : List[Any] = "636036" __UpperCAmelCase : str = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs''' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}''' __UpperCAmelCase : Tuple = requests.get(snake_case__, headers=snake_case__ ).json() return result["workflow_runs"] def _UpperCamelCase ( snake_case__ ) -> Tuple: __UpperCAmelCase : Any = get_daily_ci_runs(snake_case__ ) __UpperCAmelCase : Dict = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": __UpperCAmelCase : Optional[Any] = workflow_run["id"] break return workflow_run_id def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> List[str]: __UpperCAmelCase : Optional[int] = get_last_daily_ci_runs(snake_case__ ) if workflow_run_id is not None: __UpperCAmelCase : List[Any] = get_artifacts_links(worflow_run_id=snake_case__, token=snake_case__ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: __UpperCAmelCase : List[Any] = artifacts_links[artifact_name] download_artifact( artifact_name=snake_case__, artifact_url=snake_case__, output_dir=snake_case__, token=snake_case__ ) def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any: get_last_daily_ci_artifacts(snake_case__, snake_case__, snake_case__ ) __UpperCAmelCase : Union[str, Any] = {} for artifact_name in artifact_names: __UpperCAmelCase : Dict = os.path.join(snake_case__, f'''{artifact_name}.zip''' ) if os.path.isfile(snake_case__ ): __UpperCAmelCase : int = {} with zipfile.ZipFile(snake_case__ ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case__ ): # read the file with z.open(snake_case__ ) as f: __UpperCAmelCase : Optional[Any] = f.read().decode("UTF-8" ) return results
342
import flax.linen as nn import jax import jax.numpy as jnp class _snake_case ( nn.Module ): lowerCamelCase__: int lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]: __UpperCAmelCase : List[str] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Optional[Any] , __lowerCamelCase: Optional[int] ) -> List[Any]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = hidden_states.shape __UpperCAmelCase : Dict = jax.image.resize( __lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , ) __UpperCAmelCase : Dict = self.conv(__lowerCamelCase ) return hidden_states class _snake_case ( nn.Module ): lowerCamelCase__: int lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: str ) -> Any: __UpperCAmelCase : Optional[int] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Dict , __lowerCamelCase: str ) -> List[Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) __UpperCAmelCase : Any = self.conv(__lowerCamelCase ) return hidden_states class _snake_case ( nn.Module ): lowerCamelCase__: int lowerCamelCase__: int = None lowerCamelCase__: float = 0.0 lowerCamelCase__: bool = None lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: str ) -> List[str]: __UpperCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels __UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) __UpperCAmelCase : List[str] = nn.Conv( __lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __UpperCAmelCase : Optional[Any] = nn.Dense(__lowerCamelCase , dtype=self.dtype ) __UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) __UpperCAmelCase : Optional[Any] = nn.Dropout(self.dropout_prob ) __UpperCAmelCase : Tuple = nn.Conv( __lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __UpperCAmelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __UpperCAmelCase : List[Any] = None if use_nin_shortcut: __UpperCAmelCase : Dict = nn.Conv( __lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , ) def __call__( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=True ) -> List[Any]: __UpperCAmelCase : Dict = hidden_states __UpperCAmelCase : int = self.norma(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = nn.swish(__lowerCamelCase ) __UpperCAmelCase : Tuple = self.conva(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) ) __UpperCAmelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 ) __UpperCAmelCase : List[str] = hidden_states + temb __UpperCAmelCase : Union[str, Any] = self.norma(__lowerCamelCase ) __UpperCAmelCase : Tuple = nn.swish(__lowerCamelCase ) __UpperCAmelCase : str = self.dropout(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : List[str] = self.conva(__lowerCamelCase ) if self.conv_shortcut is not None: __UpperCAmelCase : Optional[int] = self.conv_shortcut(__lowerCamelCase ) return hidden_states + residual
342
1
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( _lowercase ): lowerCamelCase__: List[Any] = ["image_processor", "tokenizer"] lowerCamelCase__: List[str] = "AutoImageProcessor" lowerCamelCase__: List[Any] = "AutoTokenizer" def __init__( self: int , __lowerCamelCase: List[Any] , __lowerCamelCase: int ) -> Optional[Any]: super().__init__(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.image_processor def __call__( self: List[str] , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: Union[str, Any]=None , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]: if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: __UpperCAmelCase : Optional[int] = self.tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) if images is not None: __UpperCAmelCase : str = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) if text is not None and images is not None: __UpperCAmelCase : Tuple = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase ) def _lowerCamelCase ( self: Any , *__lowerCamelCase: str , **__lowerCamelCase: List[Any] ) -> Optional[Any]: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: Optional[int] , *__lowerCamelCase: Optional[Any] , **__lowerCamelCase: List[str] ) -> Optional[int]: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @property def _lowerCamelCase ( self: str ) -> int: return ["input_ids", "attention_mask", "pixel_values"]
342
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss _snake_case = pytest.mark.integration @require_faiss class _snake_case ( _lowercase ): def _lowerCamelCase ( self: Union[str, Any] ) -> str: __UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} ) return dset def _lowerCamelCase ( self: Optional[Any] ) -> Tuple: import faiss __UpperCAmelCase : Dataset = self._create_dummy_dataset() __UpperCAmelCase : int = dset.map( lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase ) __UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT ) __UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def _lowerCamelCase ( self: List[str] ) -> int: import faiss __UpperCAmelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def _lowerCamelCase ( self: Optional[int] ) -> Dict: import faiss __UpperCAmelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def _lowerCamelCase ( self: List[Any] ) -> List[Any]: __UpperCAmelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def _lowerCamelCase ( self: List[str] ) -> Dict: from elasticsearch import Elasticsearch __UpperCAmelCase : Dataset = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: __UpperCAmelCase : int = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) __UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} __UpperCAmelCase : Any = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class _snake_case ( _lowercase ): def _lowerCamelCase ( self: List[str] ) -> Optional[int]: import faiss __UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa ) __UpperCAmelCase : List[str] = 1 __UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase ) self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1] __UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase ) self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] ) __UpperCAmelCase : Dict = [scores[0] for scores in total_scores] __UpperCAmelCase : int = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCamelCase ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase ) def _lowerCamelCase ( self: Any ) -> List[str]: import faiss __UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(__lowerCamelCase ): __UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def _lowerCamelCase ( self: List[str] ) -> Dict: import faiss __UpperCAmelCase : str = faiss.IndexFlat(5 ) __UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def _lowerCamelCase ( self: Union[str, Any] ) -> int: import faiss __UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file: index.save(tmp_file.name ) __UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa ) __UpperCAmelCase : Tuple = 1 __UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def _UpperCamelCase ( snake_case__ ) -> Optional[Any]: import faiss __UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5, dtype=np.floataa ) ) __UpperCAmelCase : Optional[Any] = "index.faiss" __UpperCAmelCase : Optional[int] = f'''mock://{index_name}''' index.save(snake_case__, storage_options=mockfs.storage_options ) __UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options ) __UpperCAmelCase : str = np.zeros(5, dtype=np.floataa ) __UpperCAmelCase : Any = 1 __UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _snake_case ( _lowercase ): def _lowerCamelCase ( self: str ) -> Union[str, Any]: from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: __UpperCAmelCase : Optional[Any] = Elasticsearch() __UpperCAmelCase : Dict = {"acknowledged": True} __UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query __UpperCAmelCase : Dict = "foo" __UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} __UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __UpperCAmelCase : int = "foo" __UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __UpperCAmelCase : int = ["foo", "bar", "foobar"] __UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} __UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase ) __UpperCAmelCase : Tuple = [scores[0] for scores in total_scores] __UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCamelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __lowerCamelCase ) # batched queries with timeout __UpperCAmelCase : str = ["foo", "bar", "foobar"] __UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 ) __UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores] __UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCamelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __lowerCamelCase )
342
1
from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class _snake_case : lowerCamelCase__: int lowerCamelCase__: Node | None = None lowerCamelCase__: Node | None = None def _UpperCamelCase ( ) -> Node | None: __UpperCAmelCase : Union[str, Any] = Node(1 ) __UpperCAmelCase : Dict = Node(2 ) __UpperCAmelCase : str = Node(3 ) __UpperCAmelCase : Optional[int] = Node(4 ) __UpperCAmelCase : str = Node(5 ) return tree def _UpperCamelCase ( snake_case__ ) -> list[int]: return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def _UpperCamelCase ( snake_case__ ) -> list[int]: return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def _UpperCamelCase ( snake_case__ ) -> list[int]: return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def _UpperCamelCase ( snake_case__ ) -> int: return (max(height(root.left ), height(root.right ) ) + 1) if root else 0 def _UpperCamelCase ( snake_case__ ) -> Sequence[Node | None]: __UpperCAmelCase : list[Any] = [] if root is None: return output __UpperCAmelCase : Optional[int] = deque([root] ) while process_queue: __UpperCAmelCase : str = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def _UpperCamelCase ( snake_case__, snake_case__ ) -> Sequence[Node | None]: __UpperCAmelCase : list[Any] = [] def populate_output(snake_case__, snake_case__ ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left, level - 1 ) populate_output(root.right, level - 1 ) populate_output(snake_case__, snake_case__ ) return output def _UpperCamelCase ( snake_case__, snake_case__ ) -> Sequence[Node | None]: __UpperCAmelCase : list[Any] = [] def populate_output(snake_case__, snake_case__ ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right, level - 1 ) populate_output(root.left, level - 1 ) populate_output(snake_case__, snake_case__ ) return output def _UpperCamelCase ( snake_case__ ) -> Sequence[Node | None] | list[Any]: if root is None: return [] __UpperCAmelCase : list[Sequence[Node | None]] = [] __UpperCAmelCase : Union[str, Any] = 0 __UpperCAmelCase : str = height(snake_case__ ) for h in range(1, height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(snake_case__, snake_case__ ) ) __UpperCAmelCase : Dict = 1 else: output.append(get_nodes_from_right_to_left(snake_case__, snake_case__ ) ) __UpperCAmelCase : Optional[int] = 0 return output def _UpperCamelCase ( ) -> None: # Main function for testing. __UpperCAmelCase : Tuple = make_tree() print(f'''In-order Traversal: {inorder(snake_case__ )}''' ) print(f'''Pre-order Traversal: {preorder(snake_case__ )}''' ) print(f'''Post-order Traversal: {postorder(snake_case__ )}''', "\n" ) print(f'''Height of Tree: {height(snake_case__ )}''', "\n" ) print("Complete Level Order Traversal: " ) print(level_order(snake_case__ ), "\n" ) print("Level-wise order Traversal: " ) for level in range(1, height(snake_case__ ) + 1 ): print(f'''Level {level}:''', get_nodes_from_left_to_right(snake_case__, level=snake_case__ ) ) print("\nZigZag order Traversal: " ) print(zigzag(snake_case__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
342
import argparse import struct import unittest class _snake_case : def __init__( self: Tuple , __lowerCamelCase: bytes ) -> None: __UpperCAmelCase : Tuple = data # Initialize hash values __UpperCAmelCase : Any = [ 0x6_A_0_9_E_6_6_7, 0xB_B_6_7_A_E_8_5, 0x3_C_6_E_F_3_7_2, 0xA_5_4_F_F_5_3_A, 0x5_1_0_E_5_2_7_F, 0x9_B_0_5_6_8_8_C, 0x1_F_8_3_D_9_A_B, 0x5_B_E_0_C_D_1_9, ] # Initialize round constants __UpperCAmelCase : Dict = [ 0x4_2_8_A_2_F_9_8, 0x7_1_3_7_4_4_9_1, 0xB_5_C_0_F_B_C_F, 0xE_9_B_5_D_B_A_5, 0x3_9_5_6_C_2_5_B, 0x5_9_F_1_1_1_F_1, 0x9_2_3_F_8_2_A_4, 0xA_B_1_C_5_E_D_5, 0xD_8_0_7_A_A_9_8, 0x1_2_8_3_5_B_0_1, 0x2_4_3_1_8_5_B_E, 0x5_5_0_C_7_D_C_3, 0x7_2_B_E_5_D_7_4, 0x8_0_D_E_B_1_F_E, 0x9_B_D_C_0_6_A_7, 0xC_1_9_B_F_1_7_4, 0xE_4_9_B_6_9_C_1, 0xE_F_B_E_4_7_8_6, 0x0_F_C_1_9_D_C_6, 0x2_4_0_C_A_1_C_C, 0x2_D_E_9_2_C_6_F, 0x4_A_7_4_8_4_A_A, 0x5_C_B_0_A_9_D_C, 0x7_6_F_9_8_8_D_A, 0x9_8_3_E_5_1_5_2, 0xA_8_3_1_C_6_6_D, 0xB_0_0_3_2_7_C_8, 0xB_F_5_9_7_F_C_7, 0xC_6_E_0_0_B_F_3, 0xD_5_A_7_9_1_4_7, 0x0_6_C_A_6_3_5_1, 0x1_4_2_9_2_9_6_7, 0x2_7_B_7_0_A_8_5, 0x2_E_1_B_2_1_3_8, 0x4_D_2_C_6_D_F_C, 0x5_3_3_8_0_D_1_3, 0x6_5_0_A_7_3_5_4, 0x7_6_6_A_0_A_B_B, 0x8_1_C_2_C_9_2_E, 0x9_2_7_2_2_C_8_5, 0xA_2_B_F_E_8_A_1, 0xA_8_1_A_6_6_4_B, 0xC_2_4_B_8_B_7_0, 0xC_7_6_C_5_1_A_3, 0xD_1_9_2_E_8_1_9, 0xD_6_9_9_0_6_2_4, 0xF_4_0_E_3_5_8_5, 0x1_0_6_A_A_0_7_0, 0x1_9_A_4_C_1_1_6, 0x1_E_3_7_6_C_0_8, 0x2_7_4_8_7_7_4_C, 0x3_4_B_0_B_C_B_5, 0x3_9_1_C_0_C_B_3, 0x4_E_D_8_A_A_4_A, 0x5_B_9_C_C_A_4_F, 0x6_8_2_E_6_F_F_3, 0x7_4_8_F_8_2_E_E, 0x7_8_A_5_6_3_6_F, 0x8_4_C_8_7_8_1_4, 0x8_C_C_7_0_2_0_8, 0x9_0_B_E_F_F_F_A, 0xA_4_5_0_6_C_E_B, 0xB_E_F_9_A_3_F_7, 0xC_6_7_1_7_8_F_2, ] __UpperCAmelCase : List[Any] = self.preprocessing(self.data ) self.final_hash() @staticmethod def _lowerCamelCase ( __lowerCamelCase: bytes ) -> bytes: __UpperCAmelCase : List[str] = B"\x80" + (B"\x00" * (63 - (len(__lowerCamelCase ) + 8) % 64)) __UpperCAmelCase : int = struct.pack(">Q" , (len(__lowerCamelCase ) * 8) ) return data + padding + big_endian_integer def _lowerCamelCase ( self: Dict ) -> None: # Convert into blocks of 64 bytes __UpperCAmelCase : Dict = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __UpperCAmelCase : List[str] = list(struct.unpack(">16L" , __lowerCamelCase ) ) # add 48 0-ed integers words += [0] * 48 __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array __UpperCAmelCase : Union[str, Any] = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) __UpperCAmelCase : str = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) __UpperCAmelCase : Union[str, Any] = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_0_0_0_0_0_0_0_0 # Compression __UpperCAmelCase : Union[str, Any] = self.ror(__lowerCamelCase , 6 ) ^ self.ror(__lowerCamelCase , 11 ) ^ self.ror(__lowerCamelCase , 25 ) __UpperCAmelCase : Tuple = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g) __UpperCAmelCase : int = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_0_0_0_0_0_0_0_0 __UpperCAmelCase : List[Any] = self.ror(__lowerCamelCase , 2 ) ^ self.ror(__lowerCamelCase , 13 ) ^ self.ror(__lowerCamelCase , 22 ) __UpperCAmelCase : Dict = (a & b) ^ (a & c) ^ (b & c) __UpperCAmelCase : int = (sa + maj) % 0x1_0_0_0_0_0_0_0_0 __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = ( g, f, e, ((d + tempa) % 0x1_0_0_0_0_0_0_0_0), c, b, a, ((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0), ) __UpperCAmelCase : Optional[int] = [a, b, c, d, e, f, g, h] # Modify final values __UpperCAmelCase : List[str] = [ ((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0) for index, element in enumerate(self.hashes ) ] __UpperCAmelCase : int = "".join([hex(__lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> int: return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations) class _snake_case ( unittest.TestCase ): def _lowerCamelCase ( self: List[Any] ) -> None: import hashlib __UpperCAmelCase : Dict = bytes("Test String" , "utf-8" ) self.assertEqual(SHAaaa(__lowerCamelCase ).hash , hashlib.shaaaa(__lowerCamelCase ).hexdigest() ) def _UpperCamelCase ( ) -> None: import doctest doctest.testmod() __UpperCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument( "-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", ) parser.add_argument( "-f", "--file", dest="input_file", help="Hash contents of a file" ) __UpperCAmelCase : List[Any] = parser.parse_args() __UpperCAmelCase : Optional[int] = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file, "rb" ) as f: __UpperCAmelCase : List[str] = f.read() else: __UpperCAmelCase : List[Any] = bytes(snake_case__, "utf-8" ) print(SHAaaa(snake_case__ ).hash ) if __name__ == "__main__": main()
342
1
import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _snake_case = logging.get_logger(__name__) _snake_case = { '''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''', # See all DETR models at https://huggingface.co/models?filter=detr } class _snake_case ( _lowercase ): lowerCamelCase__: str = "detr" lowerCamelCase__: Dict = ["past_key_values"] lowerCamelCase__: str = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int: if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): __UpperCAmelCase : List[Any] = backbone_config.get("model_type" ) __UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type] __UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase ) # set timm attributes to None __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None __UpperCAmelCase : Any = use_timm_backbone __UpperCAmelCase : Optional[Any] = backbone_config __UpperCAmelCase : Optional[Any] = num_channels __UpperCAmelCase : List[Any] = num_queries __UpperCAmelCase : Optional[int] = d_model __UpperCAmelCase : Optional[Any] = encoder_ffn_dim __UpperCAmelCase : Dict = encoder_layers __UpperCAmelCase : List[Any] = encoder_attention_heads __UpperCAmelCase : int = decoder_ffn_dim __UpperCAmelCase : Tuple = decoder_layers __UpperCAmelCase : int = decoder_attention_heads __UpperCAmelCase : List[Any] = dropout __UpperCAmelCase : Dict = attention_dropout __UpperCAmelCase : Optional[Any] = activation_dropout __UpperCAmelCase : int = activation_function __UpperCAmelCase : Any = init_std __UpperCAmelCase : str = init_xavier_std __UpperCAmelCase : int = encoder_layerdrop __UpperCAmelCase : Tuple = decoder_layerdrop __UpperCAmelCase : List[Any] = encoder_layers __UpperCAmelCase : Optional[Any] = auxiliary_loss __UpperCAmelCase : int = position_embedding_type __UpperCAmelCase : Optional[int] = backbone __UpperCAmelCase : str = use_pretrained_backbone __UpperCAmelCase : Dict = dilation # Hungarian matcher __UpperCAmelCase : Optional[int] = class_cost __UpperCAmelCase : Optional[Any] = bbox_cost __UpperCAmelCase : Optional[int] = giou_cost # Loss coefficients __UpperCAmelCase : Any = mask_loss_coefficient __UpperCAmelCase : Any = dice_loss_coefficient __UpperCAmelCase : Any = bbox_loss_coefficient __UpperCAmelCase : Optional[int] = giou_loss_coefficient __UpperCAmelCase : Optional[Any] = eos_coefficient super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase ) @property def _lowerCamelCase ( self: Dict ) -> int: return self.encoder_attention_heads @property def _lowerCamelCase ( self: str ) -> int: return self.d_model @classmethod def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]: return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: str ) -> Dict[str, any]: __UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: __UpperCAmelCase : int = self.backbone_config.to_dict() __UpperCAmelCase : List[str] = self.__class__.model_type return output class _snake_case ( _lowercase ): lowerCamelCase__: Optional[int] = version.parse("1.11" ) @property def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _lowerCamelCase ( self: Optional[Any] ) -> float: return 1e-5 @property def _lowerCamelCase ( self: List[str] ) -> int: return 12
342
import numpy as np import datasets _snake_case = ''' Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] ''' _snake_case = '''\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ''' _snake_case = ''' Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def _lowerCamelCase ( self: List[str] ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> List[str]: # convert to numpy arrays __UpperCAmelCase : int = np.array(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = np.array(__lowerCamelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction __UpperCAmelCase : str = X - np.mean(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = np.cov(reference_distribution.T ) try: __UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase ) except np.linalg.LinAlgError: __UpperCAmelCase : Optional[int] = np.linalg.pinv(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = np.dot(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
342
1
from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ = False ) -> list[float]: if radian_mode: return [magnitude * cos(snake_case__ ), magnitude * sin(snake_case__ )] return [magnitude * cos(radians(snake_case__ ) ), magnitude * sin(radians(snake_case__ ) )] def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ = 10**-1 ) -> bool: __UpperCAmelCase : NDArray[floataa] = cross(snake_case__, snake_case__ ) __UpperCAmelCase : float = sum(snake_case__ ) return abs(snake_case__ ) < eps if __name__ == "__main__": # Test to check if it works _snake_case = array( [ polar_force(7_1_8.4, 180 - 30), polar_force(8_7_9.5_4, 45), polar_force(100, -90), ] ) _snake_case = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg _snake_case = array( [ polar_force(30 * 9.8_1, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) _snake_case = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg _snake_case = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) _snake_case = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
342
import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class _snake_case ( unittest.TestCase ): def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str: __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : Optional[int] = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : List[str] = use_attention_mask __UpperCAmelCase : Dict = use_token_type_ids __UpperCAmelCase : Optional[int] = use_labels __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Dict = num_hidden_layers __UpperCAmelCase : Dict = num_attention_heads __UpperCAmelCase : Tuple = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_act __UpperCAmelCase : Tuple = hidden_dropout_prob __UpperCAmelCase : str = attention_probs_dropout_prob __UpperCAmelCase : Optional[Any] = max_position_embeddings __UpperCAmelCase : Optional[int] = type_vocab_size __UpperCAmelCase : str = type_sequence_label_size __UpperCAmelCase : Tuple = initializer_range __UpperCAmelCase : str = num_choices def _lowerCamelCase ( self: Optional[Any] ) -> List[str]: __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : str = None if self.use_attention_mask: __UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Any = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , ) return config, input_ids, attention_mask def _lowerCamelCase ( self: str ) -> Any: __UpperCAmelCase : List[str] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs __UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class _snake_case ( _lowercase , unittest.TestCase ): lowerCamelCase__: str = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self: List[Any] ) -> Dict: __UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self ) @slow def _lowerCamelCase ( self: Tuple ) -> Optional[Any]: for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" ) __UpperCAmelCase : Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCamelCase ) @require_flax class _snake_case ( unittest.TestCase ): @slow def _lowerCamelCase ( self: int ) -> List[Any]: __UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" ) __UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] __UpperCAmelCase : str = (1, 11, 7_68) self.assertEqual(output.shape , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
342
1
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput _snake_case = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _snake_case ( _lowercase ): def __init__( self: Dict , *__lowerCamelCase: int , __lowerCamelCase: List[str]=None , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=None , **__lowerCamelCase: Dict ) -> List[Any]: super().__init__(*__lowerCamelCase , **__lowerCamelCase ) __UpperCAmelCase : Any = eval_examples __UpperCAmelCase : Optional[Any] = post_process_function __UpperCAmelCase : int = quant_trainer_args __UpperCAmelCase : Tuple = 1_28 # default number of calibration samples def _lowerCamelCase ( self: str , __lowerCamelCase: Union[str, Any]=None ) -> List[str]: if calib_dataset is None and self.calib_dataset is None: raise ValueError("Trainer: calibration requires an calib_dataset." ) __UpperCAmelCase : Dict = calib_dataset if calib_dataset is not None else self.calib_dataset __UpperCAmelCase : Optional[int] = self._remove_unused_columns(__lowerCamelCase , description="Calibration" ) return DataLoader( __lowerCamelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCamelCase , ) def _lowerCamelCase ( self: int , __lowerCamelCase: int=None ) -> Optional[Any]: __UpperCAmelCase : str = self.train_dataset if calib_dataset is None else calib_dataset __UpperCAmelCase : Dict = self.get_calib_dataloader(__lowerCamelCase ) __UpperCAmelCase : List[str] = self.model quant_trainer.configure_model(__lowerCamelCase , self.quant_trainer_args , calib=__lowerCamelCase ) model.eval() quant_trainer.enable_calibration(__lowerCamelCase ) logger.info("***** Running calibration *****" ) logger.info(f''' Num examples = {self.calib_num}''' ) logger.info(f''' Batch size = {calib_dataloader.batch_size}''' ) for step, inputs in enumerate(__lowerCamelCase ): # Prediction step __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.prediction_step(__lowerCamelCase , __lowerCamelCase , prediction_loss_only=__lowerCamelCase ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__lowerCamelCase , self.quant_trainer_args ) __UpperCAmelCase : str = model def _lowerCamelCase ( self: Any , __lowerCamelCase: List[str]=None , __lowerCamelCase: List[str]=None , __lowerCamelCase: Any=None , __lowerCamelCase: str = "eval" ) -> Union[str, Any]: __UpperCAmelCase : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset __UpperCAmelCase : List[Any] = self.get_eval_dataloader(__lowerCamelCase ) __UpperCAmelCase : List[Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __UpperCAmelCase : Optional[Any] = self.compute_metrics __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __UpperCAmelCase : List[str] = eval_loop( __lowerCamelCase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCamelCase , ) finally: __UpperCAmelCase : Any = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: __UpperCAmelCase : int = self.post_process_function(__lowerCamelCase , __lowerCamelCase , output.predictions ) __UpperCAmelCase : str = self.compute_metrics(__lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): __UpperCAmelCase : Any = metrics.pop(__lowerCamelCase ) self.log(__lowerCamelCase ) else: __UpperCAmelCase : Dict = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __UpperCAmelCase : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCamelCase ) return metrics def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: Any=None , __lowerCamelCase: str = "test" ) -> str: __UpperCAmelCase : List[str] = self.get_test_dataloader(__lowerCamelCase ) # Temporarily disable metric computation, we will do it in the loop here. __UpperCAmelCase : Optional[int] = self.compute_metrics __UpperCAmelCase : str = None __UpperCAmelCase : Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __UpperCAmelCase : List[str] = eval_loop( __lowerCamelCase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCamelCase , ) finally: __UpperCAmelCase : Any = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output __UpperCAmelCase : Optional[int] = self.post_process_function(__lowerCamelCase , __lowerCamelCase , output.predictions , "predict" ) __UpperCAmelCase : Optional[Any] = self.compute_metrics(__lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): __UpperCAmelCase : Optional[Any] = metrics.pop(__lowerCamelCase ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCamelCase ) def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: int="./" ) -> int: __UpperCAmelCase : List[str] = self.eval_dataset __UpperCAmelCase : List[Any] = self.get_eval_dataloader(__lowerCamelCase ) __UpperCAmelCase : Tuple = next(iter(__lowerCamelCase ) ) # saving device - to make it consistent __UpperCAmelCase : List[str] = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) # convert to tuple __UpperCAmelCase : Dict = tuple(v.to(__lowerCamelCase ) for k, v in batch.items() ) logger.info("Converting model to be onnx compatible" ) from pytorch_quantization.nn import TensorQuantizer __UpperCAmelCase : int = True __UpperCAmelCase : Optional[int] = self.model.to(__lowerCamelCase ) model.eval() model.float() __UpperCAmelCase : Tuple = model.module if hasattr(__lowerCamelCase , "module" ) else model quant_trainer.configure_model(__lowerCamelCase , self.quant_trainer_args ) __UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , "model.onnx" ) logger.info(f'''exporting model to {output_model_file}''' ) __UpperCAmelCase : str = {0: "batch_size", 1: "seq_len"} torch.onnx.export( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , export_params=__lowerCamelCase , opset_version=13 , do_constant_folding=__lowerCamelCase , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={ "input_ids": axes, "attention_mask": axes, "token_type_ids": axes, "output_start_logits": axes, "output_end_logits": axes, } , verbose=__lowerCamelCase , ) logger.info("onnx export finished" )
342
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration _snake_case = [ # tf -> hf ('''/''', '''.'''), ('''layer_''', '''layers.'''), ('''kernel''', '''weight'''), ('''beta''', '''bias'''), ('''gamma''', '''weight'''), ('''pegasus''', '''model'''), ] _snake_case = [ ('''.output.dense''', '''.fc2'''), ('''intermediate.LayerNorm''', '''final_layer_norm'''), ('''intermediate.dense''', '''fc1'''), ] _snake_case = ( INIT_COMMON + [ ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.out_proj'''), ('''attention.self''', '''self_attn'''), ('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''), ('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''), ('''attention.encdec''', '''encoder_attn'''), ('''key''', '''k_proj'''), ('''value''', '''v_proj'''), ('''query''', '''q_proj'''), ('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''), ] + END_COMMON ) _snake_case = ( INIT_COMMON + [ ('''embeddings.word_embeddings''', '''shared.weight'''), ('''embeddings.position_embeddings''', '''embed_positions.weight'''), ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.output'''), ('''attention.self''', '''self_attn.self'''), ('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''), ] + END_COMMON ) _snake_case = [ '''encdec/key/bias''', '''encdec/query/bias''', '''encdec/value/bias''', '''self/key/bias''', '''self/query/bias''', '''self/value/bias''', '''encdec_output/dense/bias''', '''attention/output/dense/bias''', ] def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any: for tf_name, hf_name in patterns: __UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ ) return k def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration: __UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ ) __UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ ) __UpperCAmelCase : Optional[Any] = torch_model.state_dict() __UpperCAmelCase : Optional[int] = {} # separating decoder weights __UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )} __UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )} for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ): __UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE] if any(snake_case__ ): continue __UpperCAmelCase : List[str] = DECODER_PATTERNS __UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ ) if new_k not in state_dict: raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ["dense", "query", "key", "value"] ): __UpperCAmelCase : Optional[int] = v.T __UpperCAmelCase : str = torch.from_numpy(snake_case__ ) assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ): __UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE] if any(snake_case__ ): continue __UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS __UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ["dense", "query", "key", "value"] ): __UpperCAmelCase : List[Any] = v.T __UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' __UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"] __UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" ) __UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ ) __UpperCAmelCase : str = [ k for k in missing if k not in [ "final_logits_bias", "model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight", "lm_head.weight", ] ] assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], f'''no matches found for the following tf keys {extra}''' return torch_model def _UpperCamelCase ( snake_case__ ) -> Dict: __UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ ) __UpperCAmelCase : List[str] = {} __UpperCAmelCase : str = ["global_step"] for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ): __UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name ) if skip_key: continue __UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ ) __UpperCAmelCase : Tuple = array return tf_weights def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict: __UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ ) __UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ ) torch_model.save_pretrained(snake_case__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') _snake_case = parser.parse_args() _snake_case = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
342
1
class _snake_case : def __init__( self: Dict ) -> Any: __UpperCAmelCase : List[Any] = {} def _lowerCamelCase ( self: int ) -> None: print(self.vertex ) for i in self.vertex: print(__lowerCamelCase , " -> " , " -> ".join([str(__lowerCamelCase ) for j in self.vertex[i]] ) ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> None: # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(__lowerCamelCase ) else: # else make a new vertex __UpperCAmelCase : str = [to_vertex] def _lowerCamelCase ( self: List[str] ) -> None: # visited array for storing already visited nodes __UpperCAmelCase : List[str] = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(__lowerCamelCase , __lowerCamelCase ) def _lowerCamelCase ( self: int , __lowerCamelCase: int , __lowerCamelCase: list ) -> None: # mark start vertex as visited __UpperCAmelCase : List[Any] = True print(__lowerCamelCase , end=" " ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": _snake_case = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print('''DFS:''') g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
342
import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class _snake_case ( _lowercase ): lowerCamelCase__: Any = ["image_processor", "tokenizer"] lowerCamelCase__: Optional[Any] = "BlipImageProcessor" lowerCamelCase__: Optional[int] = "AutoTokenizer" def __init__( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict: super().__init__(__lowerCamelCase , __lowerCamelCase ) # add QFormer tokenizer __UpperCAmelCase : Dict = qformer_tokenizer def __call__( self: Any , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Dict , ) -> BatchFeature: if images is None and text is None: raise ValueError("You have to specify at least images or text." ) __UpperCAmelCase : str = BatchFeature() if text is not None: __UpperCAmelCase : Any = self.tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) encoding.update(__lowerCamelCase ) __UpperCAmelCase : Dict = self.qformer_tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) __UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" ) __UpperCAmelCase : Optional[int] = qformer_text_encoding.pop("attention_mask" ) if images is not None: __UpperCAmelCase : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase ) encoding.update(__lowerCamelCase ) return encoding def _lowerCamelCase ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Optional[Any]: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: Dict ) -> Tuple: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def _lowerCamelCase ( self: List[str] ) -> Tuple: __UpperCAmelCase : str = self.tokenizer.model_input_names __UpperCAmelCase : Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> str: if os.path.isfile(__lowerCamelCase ): raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) __UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , "qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(__lowerCamelCase ) return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase ) @classmethod def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]: __UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" ) __UpperCAmelCase : List[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase ) args.append(__lowerCamelCase ) return cls(*__lowerCamelCase )
342
1
import numpy as np import qiskit def _UpperCamelCase ( snake_case__ = 8, snake_case__ = None ) -> str: __UpperCAmelCase : List[Any] = np.random.default_rng(seed=snake_case__ ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. __UpperCAmelCase : Tuple = 6 * key_len # Measurement basis for Alice's qubits. __UpperCAmelCase : Dict = rng.integers(2, size=snake_case__ ) # The set of states Alice will prepare. __UpperCAmelCase : Union[str, Any] = rng.integers(2, size=snake_case__ ) # Measurement basis for Bob's qubits. __UpperCAmelCase : int = rng.integers(2, size=snake_case__ ) # Quantum Circuit to simulate BB84 __UpperCAmelCase : Any = qiskit.QuantumCircuit(snake_case__, name="BB84" ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(snake_case__ ): if alice_state[index] == 1: bbaa_circ.x(snake_case__ ) if alice_basis[index] == 1: bbaa_circ.h(snake_case__ ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(snake_case__ ): if bob_basis[index] == 1: bbaa_circ.h(snake_case__ ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. __UpperCAmelCase : Tuple = qiskit.Aer.get_backend("aer_simulator" ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. __UpperCAmelCase : List[Any] = qiskit.execute(snake_case__, snake_case__, shots=1, seed_simulator=snake_case__ ) # Returns the result of measurement. __UpperCAmelCase : Dict = job.result().get_counts(snake_case__ ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. __UpperCAmelCase : List[str] = "".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( snake_case__, snake_case__, snake_case__ ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. __UpperCAmelCase : Tuple = gen_key[:key_len] if len(snake_case__ ) >= key_len else gen_key.ljust(snake_case__, "0" ) return key if __name__ == "__main__": print(F'The generated key is : {bbaa(8, seed=0)}') from doctest import testmod testmod()
342
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _snake_case = logging.get_logger(__name__) _snake_case = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } _snake_case = { '''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''}, '''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''}, '''tokenizer_config_file''': { '''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json''' }, } _snake_case = {'''facebook/blenderbot-3B''': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _UpperCamelCase ( ) -> Dict: __UpperCAmelCase : Tuple = ( list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) ) ) __UpperCAmelCase : str = bs[:] __UpperCAmelCase : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(snake_case__ ) cs.append(2**8 + n ) n += 1 __UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs] return dict(zip(snake_case__, snake_case__ ) ) def _UpperCamelCase ( snake_case__ ) -> Any: __UpperCAmelCase : List[Any] = set() __UpperCAmelCase : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase : Union[str, Any] = char return pairs class _snake_case ( _lowercase ): lowerCamelCase__: str = VOCAB_FILES_NAMES lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__: Dict = ["input_ids", "attention_mask"] def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]: __UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token __UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token __UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token __UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token __UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token __UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: __UpperCAmelCase : List[Any] = json.load(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()} __UpperCAmelCase : Dict = errors # how to handle errors in decoding __UpperCAmelCase : Optional[int] = bytes_to_unicode() __UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: __UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1] __UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges] __UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : Tuple = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _lowerCamelCase ( self: Dict ) -> Any: return len(self.encoder ) def _lowerCamelCase ( self: Optional[Any] ) -> List[str]: return dict(self.encoder , **self.added_tokens_encoder ) def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]: if token in self.cache: return self.cache[token] __UpperCAmelCase : List[Any] = tuple(__lowerCamelCase ) __UpperCAmelCase : Dict = get_pairs(__lowerCamelCase ) if not pairs: return token while True: __UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : str = 0 while i < len(__lowerCamelCase ): try: __UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase : Union[str, Any] = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase : List[Any] = tuple(__lowerCamelCase ) __UpperCAmelCase : str = new_word if len(__lowerCamelCase ) == 1: break else: __UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = word return word def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict: __UpperCAmelCase : Any = [] for token in re.findall(self.pat , __lowerCamelCase ): __UpperCAmelCase : int = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict: return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]: return self.decoder.get(__lowerCamelCase ) def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int: __UpperCAmelCase : Dict = "".join(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase : Any = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __UpperCAmelCase : Dict = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) __UpperCAmelCase : Optional[Any] = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) __UpperCAmelCase : Optional[Any] = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]: __UpperCAmelCase : int = [self.sep_token_id] __UpperCAmelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]: __UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): __UpperCAmelCase : Optional[Any] = " " + text return (text, kwargs) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]: return token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]: __UpperCAmelCase : Tuple = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase ) if len(__lowerCamelCase ) > self.model_max_length: __UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
342
1
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def _UpperCamelCase ( snake_case__ ) -> List[str]: __UpperCAmelCase : Any = {} __UpperCAmelCase : Optional[int] = tokenizer(example["content"], truncation=snake_case__ )["input_ids"] __UpperCAmelCase : int = len(example["content"] ) / len(output["input_ids"] ) return output _snake_case = HfArgumentParser(PretokenizationArguments) _snake_case = parser.parse_args() if args.num_workers is None: _snake_case = multiprocessing.cpu_count() _snake_case = AutoTokenizer.from_pretrained(args.tokenizer_dir) _snake_case = time.time() _snake_case = load_dataset(args.dataset_name, split='''train''') print(F'Dataset loaded in {time.time()-t_start:.2f}s') _snake_case = time.time() _snake_case = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(F'Dataset tokenized in {time.time()-t_start:.2f}s') _snake_case = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'Data pushed to the hub in {time.time()-t_start:.2f}s')
342
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( _lowercase , unittest.TestCase ): lowerCamelCase__: List[Any] = CanineTokenizer lowerCamelCase__: Optional[int] = False def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: super().setUp() __UpperCAmelCase : Tuple = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]: return CanineTokenizer.from_pretrained("google/canine-s" ) def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> CanineTokenizer: __UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) __UpperCAmelCase : Optional[int] = 10_24 return tokenizer @require_torch def _lowerCamelCase ( self: List[str] ) -> int: __UpperCAmelCase : Union[str, Any] = self.canine_tokenizer __UpperCAmelCase : List[str] = ["Life is like a box of chocolates.", "You never know what you're gonna get."] # fmt: off __UpperCAmelCase : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on __UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def _lowerCamelCase ( self: Optional[Any] ) -> Tuple: __UpperCAmelCase : Optional[Any] = self.canine_tokenizer __UpperCAmelCase : Dict = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."] __UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("input_ids" , __lowerCamelCase ) self.assertIn("attention_mask" , __lowerCamelCase ) self.assertIn("token_type_ids" , __lowerCamelCase ) @require_torch def _lowerCamelCase ( self: Any ) -> List[str]: __UpperCAmelCase : Optional[Any] = self.canine_tokenizer __UpperCAmelCase : int = [ "What's the weater?", "It's about 25 degrees.", ] __UpperCAmelCase : List[Any] = tokenizer( text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) def _lowerCamelCase ( self: List[Any] ) -> Tuple: # safety check on max_len default value so we are sure the test works __UpperCAmelCase : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __UpperCAmelCase : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __UpperCAmelCase : int = tempfile.mkdtemp() __UpperCAmelCase : List[Any] = " He is very happy, UNwant\u00E9d,running" __UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) tokenizer.save_pretrained(__lowerCamelCase ) __UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCamelCase ) __UpperCAmelCase : Dict = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) shutil.rmtree(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __UpperCAmelCase : List[Any] = tempfile.mkdtemp() __UpperCAmelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running" __UpperCAmelCase : str = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: __UpperCAmelCase : Tuple = chr(0xE_0_0_7 ) additional_special_tokens.append(__lowerCamelCase ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) __UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) tokenizer.save_pretrained(__lowerCamelCase ) __UpperCAmelCase : str = tokenizer.__class__.from_pretrained(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__lowerCamelCase ) def _lowerCamelCase ( self: str ) -> Optional[int]: __UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_clean_sequence(__lowerCamelCase ) # a special token for Canine can be defined as follows: __UpperCAmelCase : int = 0xE_0_0_5 __UpperCAmelCase : Tuple = chr(__lowerCamelCase ) tokenizer.add_special_tokens({"cls_token": special_token} ) __UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(len(__lowerCamelCase ) , 1 ) __UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) __UpperCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) __UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(__lowerCamelCase , input_encoded + special_token_id ) __UpperCAmelCase : Optional[int] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) self.assertTrue(special_token not in decoded ) def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]: __UpperCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase : Optional[int] = chr(0xE_0_0_5 ) __UpperCAmelCase : List[str] = chr(0xE_0_0_6 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} ) __UpperCAmelCase : Tuple = tokenizer.tokenize(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase ) self.assertEqual(len(__lowerCamelCase ) , 1 ) self.assertEqual(len(__lowerCamelCase ) , 1 ) self.assertEqual(token_a[0] , __lowerCamelCase ) self.assertEqual(token_a[0] , __lowerCamelCase ) @require_tokenizers def _lowerCamelCase ( self: str ) -> Union[str, Any]: __UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: __UpperCAmelCase : Union[str, Any] = 0xE_0_0_6 __UpperCAmelCase : int = chr(__lowerCamelCase ) __UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase ) tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__lowerCamelCase ) tokenizer.from_pretrained(__lowerCamelCase ) def _lowerCamelCase ( self: Dict ) -> List[str]: __UpperCAmelCase : str = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: __UpperCAmelCase : Tuple = json.load(__lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: __UpperCAmelCase : Optional[int] = json.load(__lowerCamelCase ) # a special token for Canine can be defined as follows: __UpperCAmelCase : Any = 0xE_0_0_6 __UpperCAmelCase : Union[str, Any] = chr(__lowerCamelCase ) __UpperCAmelCase : Dict = [new_token_a] __UpperCAmelCase : int = [new_token_a] with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(__lowerCamelCase , __lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(__lowerCamelCase , __lowerCamelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 ) self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) __UpperCAmelCase : List[Any] = 0xE_0_0_7 __UpperCAmelCase : List[Any] = chr(__lowerCamelCase ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __UpperCAmelCase : str = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )] __UpperCAmelCase : Dict = tokenizer_class.from_pretrained( __lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 ) self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase : int = "hello world" if self.space_between_special_tokens: __UpperCAmelCase : Any = "[CLS] hello world [SEP]" else: __UpperCAmelCase : Union[str, Any] = input __UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) __UpperCAmelCase : Any = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__lowerCamelCase , [output, output.lower()] ) def _lowerCamelCase ( self: Dict ) -> Any: __UpperCAmelCase : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase : List[str] = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] __UpperCAmelCase : List[str] = "a" __UpperCAmelCase : Any = ord(__lowerCamelCase ) for attr in attributes_list: setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase ) setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase ) setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] ) __UpperCAmelCase : Tuple = 0xE_0_0_6 __UpperCAmelCase : Optional[Any] = chr(__lowerCamelCase ) setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] ) def _lowerCamelCase ( self: str ) -> Union[str, Any]: pass def _lowerCamelCase ( self: Any ) -> Any: pass def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple: pass def _lowerCamelCase ( self: Optional[int] ) -> Any: pass def _lowerCamelCase ( self: List[str] ) -> str: pass def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]: pass def _lowerCamelCase ( self: Optional[Any] ) -> Tuple: pass def _lowerCamelCase ( self: str ) -> Tuple: pass
342
1
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]: __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : Optional[int] = image_size __UpperCAmelCase : List[str] = num_channels __UpperCAmelCase : Union[str, Any] = num_stages __UpperCAmelCase : List[str] = hidden_sizes __UpperCAmelCase : Any = depths __UpperCAmelCase : Optional[int] = is_training __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : Optional[int] = intermediate_size __UpperCAmelCase : Optional[Any] = hidden_act __UpperCAmelCase : Union[str, Any] = num_labels __UpperCAmelCase : Any = initializer_range __UpperCAmelCase : List[str] = out_features __UpperCAmelCase : Tuple = out_indices __UpperCAmelCase : List[Any] = scope def _lowerCamelCase ( self: List[Any] ) -> Optional[int]: __UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : List[str] = None if self.use_labels: __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase : Optional[Any] = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self: Tuple ) -> List[Any]: return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]: __UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : List[str] = model(__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple: __UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Any = model(__lowerCamelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Any = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _lowerCamelCase ( self: int ) -> List[str]: __UpperCAmelCase : int = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs __UpperCAmelCase : str = {"pixel_values": pixel_values} return config, inputs_dict def _lowerCamelCase ( self: List[Any] ) -> List[Any]: __UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class _snake_case ( _lowercase , _lowercase , unittest.TestCase ): lowerCamelCase__: Dict = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCamelCase__: str = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCamelCase__: Tuple = False lowerCamelCase__: int = False lowerCamelCase__: Dict = False lowerCamelCase__: int = False lowerCamelCase__: Any = False def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]: __UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self ) __UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def _lowerCamelCase ( self: Dict ) -> Tuple: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self: List[Any] ) -> int: return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def _lowerCamelCase ( self: Any ) -> Any: pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def _lowerCamelCase ( self: str ) -> Optional[Any]: pass def _lowerCamelCase ( self: List[Any] ) -> int: if not self.model_tester.is_training: return for model_class in self.all_model_classes: __UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels() __UpperCAmelCase : Optional[Any] = True if model_class.__name__ in [ *get_values(__lowerCamelCase ), *get_values(__lowerCamelCase ), ]: continue __UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() __UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) __UpperCAmelCase : Any = model(**__lowerCamelCase ).loss loss.backward() def _lowerCamelCase ( self: Optional[int] ) -> Dict: if not self.model_tester.is_training: return for model_class in self.all_model_classes: __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels() __UpperCAmelCase : List[str] = False __UpperCAmelCase : int = True if ( model_class.__name__ in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )] or not model_class.supports_gradient_checkpointing ): continue __UpperCAmelCase : int = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.gradient_checkpointing_enable() model.train() __UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) __UpperCAmelCase : Any = model(**__lowerCamelCase ).loss loss.backward() def _lowerCamelCase ( self: List[str] ) -> Dict: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : str = model_class(__lowerCamelCase ) __UpperCAmelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : List[Any] = [*signature.parameters.keys()] __UpperCAmelCase : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _lowerCamelCase ( self: str ) -> List[Any]: __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _lowerCamelCase ( self: Union[str, Any] ) -> Dict: def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ): __UpperCAmelCase : Any = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) __UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : Optional[int] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Optional[int] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Any = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def _lowerCamelCase ( self: Dict ) -> List[Any]: for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def _UpperCamelCase ( ) -> List[Any]: __UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self: Optional[int] ) -> Dict: return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def _lowerCamelCase ( self: List[Any] ) -> Tuple: __UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase ) __UpperCAmelCase : List[str] = self.default_image_processor __UpperCAmelCase : Optional[Any] = prepare_img() __UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): __UpperCAmelCase : str = model(**__lowerCamelCase ) # verify the logits __UpperCAmelCase : Dict = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) __UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
342
import logging import os from .state import PartialState class _snake_case ( logging.LoggerAdapter ): @staticmethod def _lowerCamelCase ( __lowerCamelCase: Any ) -> int: __UpperCAmelCase : str = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]: if PartialState._shared_state == {}: raise RuntimeError( "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." ) __UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase ) if self.isEnabledFor(__lowerCamelCase ): if self._should_log(__lowerCamelCase ): __UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase ) self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) elif in_order: __UpperCAmelCase : Optional[int] = PartialState() for i in range(state.num_processes ): if i == state.process_index: __UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase ) self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) state.wait_for_everyone() def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]: if log_level is None: __UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ ) __UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(snake_case__, {} )
342
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class _snake_case ( _lowercase ): lowerCamelCase__: Optional[Any] = "poolformer" def __init__( self: List[Any] , __lowerCamelCase: Optional[Any]=3 , __lowerCamelCase: List[Any]=16 , __lowerCamelCase: Optional[int]=16 , __lowerCamelCase: Optional[Any]=3 , __lowerCamelCase: List[str]=4.0 , __lowerCamelCase: Union[str, Any]=[2, 2, 6, 2] , __lowerCamelCase: List[Any]=[64, 1_28, 3_20, 5_12] , __lowerCamelCase: Tuple=[7, 3, 3, 3] , __lowerCamelCase: Dict=[4, 2, 2, 2] , __lowerCamelCase: Any=[2, 1, 1, 1] , __lowerCamelCase: List[Any]=4 , __lowerCamelCase: List[Any]=0.0 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: List[str]=True , __lowerCamelCase: Union[str, Any]=1e-5 , __lowerCamelCase: Optional[Any]=0.02 , **__lowerCamelCase: Any , ) -> Optional[Any]: __UpperCAmelCase : Dict = num_channels __UpperCAmelCase : str = patch_size __UpperCAmelCase : int = stride __UpperCAmelCase : Dict = padding __UpperCAmelCase : List[Any] = pool_size __UpperCAmelCase : str = hidden_sizes __UpperCAmelCase : Optional[int] = mlp_ratio __UpperCAmelCase : Optional[Any] = depths __UpperCAmelCase : Any = patch_sizes __UpperCAmelCase : int = strides __UpperCAmelCase : List[str] = num_encoder_blocks __UpperCAmelCase : Optional[int] = drop_path_rate __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Dict = use_layer_scale __UpperCAmelCase : Tuple = layer_scale_init_value __UpperCAmelCase : Optional[int] = initializer_range super().__init__(**__lowerCamelCase ) class _snake_case ( _lowercase ): lowerCamelCase__: Any = version.parse("1.11" ) @property def _lowerCamelCase ( self: int ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _lowerCamelCase ( self: Dict ) -> float: return 2e-3
342
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _snake_case ( _lowercase ): def __init__( self: Optional[Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str: super().__init__( __lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , ) __UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths} __UpperCAmelCase : int = Text( cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , ) def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]: # Build iterable dataset if self.streaming: __UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCAmelCase : Any = None __UpperCAmelCase : Any = None __UpperCAmelCase : Dict = None __UpperCAmelCase : str = None self.builder.download_and_prepare( download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , ) __UpperCAmelCase : Dict = self.builder.as_dataset( split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory ) return dataset
342
1
from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class _snake_case ( nn.Module ): def __init__( self: Any , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: str = "geglu" , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: str = "layer_norm" , __lowerCamelCase: bool = False , ) -> List[Any]: super().__init__() __UpperCAmelCase : List[Any] = only_cross_attention __UpperCAmelCase : Tuple = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero" __UpperCAmelCase : List[str] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: __UpperCAmelCase : Dict = AdaLayerNorm(__lowerCamelCase , __lowerCamelCase ) elif self.use_ada_layer_norm_zero: __UpperCAmelCase : Optional[Any] = AdaLayerNormZero(__lowerCamelCase , __lowerCamelCase ) else: __UpperCAmelCase : int = nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase ) __UpperCAmelCase : List[Any] = Attention( query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , dropout=__lowerCamelCase , bias=__lowerCamelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__lowerCamelCase , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. __UpperCAmelCase : Union[str, Any] = ( AdaLayerNorm(__lowerCamelCase , __lowerCamelCase ) if self.use_ada_layer_norm else nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase ) ) __UpperCAmelCase : Tuple = Attention( query_dim=__lowerCamelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__lowerCamelCase , dim_head=__lowerCamelCase , dropout=__lowerCamelCase , bias=__lowerCamelCase , upcast_attention=__lowerCamelCase , ) # is self-attn if encoder_hidden_states is none else: __UpperCAmelCase : Any = None __UpperCAmelCase : Tuple = None # 3. Feed-forward __UpperCAmelCase : Dict = nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase ) __UpperCAmelCase : List[Any] = FeedForward(__lowerCamelCase , dropout=__lowerCamelCase , activation_fn=__lowerCamelCase , final_dropout=__lowerCamelCase ) # let chunk size default to None __UpperCAmelCase : Any = None __UpperCAmelCase : str = 0 def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[int] , __lowerCamelCase: int ) -> Optional[Any]: # Sets chunk feed-forward __UpperCAmelCase : Dict = chunk_size __UpperCAmelCase : List[Any] = dim def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: torch.FloatTensor , __lowerCamelCase: Optional[torch.FloatTensor] = None , __lowerCamelCase: Optional[torch.FloatTensor] = None , __lowerCamelCase: Optional[torch.FloatTensor] = None , __lowerCamelCase: Optional[torch.LongTensor] = None , __lowerCamelCase: Dict[str, Any] = None , __lowerCamelCase: Optional[torch.LongTensor] = None , ) -> Dict: # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: __UpperCAmelCase : Dict = self.norma(__lowerCamelCase , __lowerCamelCase ) elif self.use_ada_layer_norm_zero: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.norma( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hidden_dtype=hidden_states.dtype ) else: __UpperCAmelCase : Optional[Any] = self.norma(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {} __UpperCAmelCase : List[str] = self.attna( __lowerCamelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__lowerCamelCase , **__lowerCamelCase , ) if self.use_ada_layer_norm_zero: __UpperCAmelCase : List[Any] = gate_msa.unsqueeze(1 ) * attn_output __UpperCAmelCase : Union[str, Any] = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: __UpperCAmelCase : int = ( self.norma(__lowerCamelCase , __lowerCamelCase ) if self.use_ada_layer_norm else self.norma(__lowerCamelCase ) ) __UpperCAmelCase : Optional[int] = self.attna( __lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=__lowerCamelCase , **__lowerCamelCase , ) __UpperCAmelCase : Union[str, Any] = attn_output + hidden_states # 3. Feed-forward __UpperCAmelCase : int = self.norma(__lowerCamelCase ) if self.use_ada_layer_norm_zero: __UpperCAmelCase : str = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) __UpperCAmelCase : List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size __UpperCAmelCase : str = torch.cat( [self.ff(__lowerCamelCase ) for hid_slice in norm_hidden_states.chunk(__lowerCamelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: __UpperCAmelCase : Dict = self.ff(__lowerCamelCase ) if self.use_ada_layer_norm_zero: __UpperCAmelCase : str = gate_mlp.unsqueeze(1 ) * ff_output __UpperCAmelCase : Any = ff_output + hidden_states return hidden_states class _snake_case ( nn.Module ): def __init__( self: str , __lowerCamelCase: int , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 4 , __lowerCamelCase: float = 0.0 , __lowerCamelCase: str = "geglu" , __lowerCamelCase: bool = False , ) -> int: super().__init__() __UpperCAmelCase : str = int(dim * mult ) __UpperCAmelCase : Dict = dim_out if dim_out is not None else dim if activation_fn == "gelu": __UpperCAmelCase : Optional[Any] = GELU(__lowerCamelCase , __lowerCamelCase ) if activation_fn == "gelu-approximate": __UpperCAmelCase : Optional[int] = GELU(__lowerCamelCase , __lowerCamelCase , approximate="tanh" ) elif activation_fn == "geglu": __UpperCAmelCase : Dict = GEGLU(__lowerCamelCase , __lowerCamelCase ) elif activation_fn == "geglu-approximate": __UpperCAmelCase : int = ApproximateGELU(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = nn.ModuleList([] ) # project in self.net.append(__lowerCamelCase ) # project dropout self.net.append(nn.Dropout(__lowerCamelCase ) ) # project out self.net.append(nn.Linear(__lowerCamelCase , __lowerCamelCase ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(__lowerCamelCase ) ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Any ) -> Tuple: for module in self.net: __UpperCAmelCase : str = module(__lowerCamelCase ) return hidden_states class _snake_case ( nn.Module ): def __init__( self: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: str = "none" ) -> Tuple: super().__init__() __UpperCAmelCase : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = approximate def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Optional[Any] ) -> Tuple: if gate.device.type != "mps": return F.gelu(__lowerCamelCase , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: int ) -> str: __UpperCAmelCase : int = self.proj(__lowerCamelCase ) __UpperCAmelCase : int = self.gelu(__lowerCamelCase ) return hidden_states class _snake_case ( nn.Module ): def __init__( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> List[Any]: super().__init__() __UpperCAmelCase : Union[str, Any] = nn.Linear(__lowerCamelCase , dim_out * 2 ) def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> List[Any]: if gate.device.type != "mps": return F.gelu(__lowerCamelCase ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[Any] ) -> int: __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.proj(__lowerCamelCase ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(__lowerCamelCase ) class _snake_case ( nn.Module ): def __init__( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> List[Any]: super().__init__() __UpperCAmelCase : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase ) def _lowerCamelCase ( self: str , __lowerCamelCase: Any ) -> int: __UpperCAmelCase : Optional[int] = self.proj(__lowerCamelCase ) return x * torch.sigmoid(1.7_02 * x ) class _snake_case ( nn.Module ): def __init__( self: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Any: super().__init__() __UpperCAmelCase : List[str] = nn.Embedding(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = nn.SiLU() __UpperCAmelCase : Tuple = nn.Linear(__lowerCamelCase , embedding_dim * 2 ) __UpperCAmelCase : Optional[Any] = nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase ) def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[Any] ) -> int: __UpperCAmelCase : Any = self.linear(self.silu(self.emb(__lowerCamelCase ) ) ) __UpperCAmelCase , __UpperCAmelCase : Any = torch.chunk(__lowerCamelCase , 2 ) __UpperCAmelCase : Dict = self.norm(__lowerCamelCase ) * (1 + scale) + shift return x class _snake_case ( nn.Module ): def __init__( self: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] ) -> Dict: super().__init__() __UpperCAmelCase : List[str] = CombinedTimestepLabelEmbeddings(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : List[str] = nn.SiLU() __UpperCAmelCase : List[str] = nn.Linear(__lowerCamelCase , 6 * embedding_dim , bias=__lowerCamelCase ) __UpperCAmelCase : int = nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase , eps=1e-6 ) def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int=None ) -> Any: __UpperCAmelCase : Optional[Any] = self.linear(self.silu(self.emb(__lowerCamelCase , __lowerCamelCase , hidden_dtype=__lowerCamelCase ) ) ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = emb.chunk(6 , dim=1 ) __UpperCAmelCase : Optional[int] = self.norm(__lowerCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class _snake_case ( nn.Module ): def __init__( self: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: float = 1e-5 ) -> int: super().__init__() __UpperCAmelCase : Tuple = num_groups __UpperCAmelCase : Dict = eps if act_fn is None: __UpperCAmelCase : List[str] = None else: __UpperCAmelCase : Optional[Any] = get_activation(__lowerCamelCase ) __UpperCAmelCase : Tuple = nn.Linear(__lowerCamelCase , out_dim * 2 ) def _lowerCamelCase ( self: Any , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] ) -> List[Any]: if self.act: __UpperCAmelCase : Any = self.act(__lowerCamelCase ) __UpperCAmelCase : Dict = self.linear(__lowerCamelCase ) __UpperCAmelCase : Dict = emb[:, :, None, None] __UpperCAmelCase , __UpperCAmelCase : Optional[int] = emb.chunk(2 , dim=1 ) __UpperCAmelCase : Union[str, Any] = F.group_norm(__lowerCamelCase , self.num_groups , eps=self.eps ) __UpperCAmelCase : Optional[Any] = x * (1 + scale) + shift return x
342
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _snake_case = { '''configuration_trajectory_transformer''': [ '''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrajectoryTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrajectoryTransformerModel''', '''TrajectoryTransformerPreTrainedModel''', '''load_tf_weights_in_trajectory_transformer''', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
342
1
from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=1e-1_2 ) -> str: __UpperCAmelCase : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T __UpperCAmelCase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T return jnp.matmul(snake_case__, norm_emb_a.T ) class _snake_case ( nn.Module ): lowerCamelCase__: CLIPConfig lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: Any ) -> Tuple: __UpperCAmelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config ) __UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype ) __UpperCAmelCase : int = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) ) __UpperCAmelCase : int = self.param( "special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) ) __UpperCAmelCase : Tuple = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) ) __UpperCAmelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) ) def __call__( self: List[Any] , __lowerCamelCase: Dict ) -> Dict: __UpperCAmelCase : Optional[int] = self.vision_model(__lowerCamelCase )[1] __UpperCAmelCase : List[str] = self.visual_projection(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds ) __UpperCAmelCase : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs __UpperCAmelCase : List[str] = 0.0 __UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment __UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 ) __UpperCAmelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase ) # Use a lower threshold if an image has any special care concept __UpperCAmelCase : List[Any] = is_special_care * 0.01 __UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment __UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 ) __UpperCAmelCase : Any = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class _snake_case ( _lowercase ): lowerCamelCase__: int = CLIPConfig lowerCamelCase__: Tuple = "clip_input" lowerCamelCase__: str = FlaxStableDiffusionSafetyCheckerModule def __init__( self: Union[str, Any] , __lowerCamelCase: CLIPConfig , __lowerCamelCase: Optional[Tuple] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: jnp.dtype = jnp.floataa , __lowerCamelCase: bool = True , **__lowerCamelCase: Optional[int] , ) -> int: if input_shape is None: __UpperCAmelCase : Dict = (1, 2_24, 2_24, 3) __UpperCAmelCase : Tuple = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase ) super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: jax.random.KeyArray , __lowerCamelCase: Tuple , __lowerCamelCase: FrozenDict = None ) -> FrozenDict: # init input tensor __UpperCAmelCase : Tuple = jax.random.normal(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Dict = jax.random.split(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng} __UpperCAmelCase : str = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"] return random_params def __call__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: dict = None , ) -> List[Any]: __UpperCAmelCase : int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) ) return self.module.apply( {"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
342
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]: __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : Optional[int] = image_size __UpperCAmelCase : List[str] = num_channels __UpperCAmelCase : Union[str, Any] = num_stages __UpperCAmelCase : List[str] = hidden_sizes __UpperCAmelCase : Any = depths __UpperCAmelCase : Optional[int] = is_training __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : Optional[int] = intermediate_size __UpperCAmelCase : Optional[Any] = hidden_act __UpperCAmelCase : Union[str, Any] = num_labels __UpperCAmelCase : Any = initializer_range __UpperCAmelCase : List[str] = out_features __UpperCAmelCase : Tuple = out_indices __UpperCAmelCase : List[Any] = scope def _lowerCamelCase ( self: List[Any] ) -> Optional[int]: __UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : List[str] = None if self.use_labels: __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase : Optional[Any] = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self: Tuple ) -> List[Any]: return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]: __UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : List[str] = model(__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple: __UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Any = model(__lowerCamelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Any = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _lowerCamelCase ( self: int ) -> List[str]: __UpperCAmelCase : int = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs __UpperCAmelCase : str = {"pixel_values": pixel_values} return config, inputs_dict def _lowerCamelCase ( self: List[Any] ) -> List[Any]: __UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class _snake_case ( _lowercase , _lowercase , unittest.TestCase ): lowerCamelCase__: Dict = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCamelCase__: str = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCamelCase__: Tuple = False lowerCamelCase__: int = False lowerCamelCase__: Dict = False lowerCamelCase__: int = False lowerCamelCase__: Any = False def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]: __UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self ) __UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def _lowerCamelCase ( self: Dict ) -> Tuple: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self: List[Any] ) -> int: return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def _lowerCamelCase ( self: Any ) -> Any: pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def _lowerCamelCase ( self: str ) -> Optional[Any]: pass def _lowerCamelCase ( self: List[Any] ) -> int: if not self.model_tester.is_training: return for model_class in self.all_model_classes: __UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels() __UpperCAmelCase : Optional[Any] = True if model_class.__name__ in [ *get_values(__lowerCamelCase ), *get_values(__lowerCamelCase ), ]: continue __UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() __UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) __UpperCAmelCase : Any = model(**__lowerCamelCase ).loss loss.backward() def _lowerCamelCase ( self: Optional[int] ) -> Dict: if not self.model_tester.is_training: return for model_class in self.all_model_classes: __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels() __UpperCAmelCase : List[str] = False __UpperCAmelCase : int = True if ( model_class.__name__ in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )] or not model_class.supports_gradient_checkpointing ): continue __UpperCAmelCase : int = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.gradient_checkpointing_enable() model.train() __UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) __UpperCAmelCase : Any = model(**__lowerCamelCase ).loss loss.backward() def _lowerCamelCase ( self: List[str] ) -> Dict: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : str = model_class(__lowerCamelCase ) __UpperCAmelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : List[Any] = [*signature.parameters.keys()] __UpperCAmelCase : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _lowerCamelCase ( self: str ) -> List[Any]: __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _lowerCamelCase ( self: Union[str, Any] ) -> Dict: def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ): __UpperCAmelCase : Any = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) __UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : Optional[int] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Optional[int] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Any = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def _lowerCamelCase ( self: Dict ) -> List[Any]: for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def _UpperCamelCase ( ) -> List[Any]: __UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self: Optional[int] ) -> Dict: return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def _lowerCamelCase ( self: List[Any] ) -> Tuple: __UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase ) __UpperCAmelCase : List[str] = self.default_image_processor __UpperCAmelCase : Optional[Any] = prepare_img() __UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): __UpperCAmelCase : str = model(**__lowerCamelCase ) # verify the logits __UpperCAmelCase : Dict = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) __UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
342
1
import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor _snake_case = logging.get_logger(__name__) class _snake_case ( _lowercase ): def __init__( self: Optional[int] , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> None: warnings.warn( "The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use DeformableDetrImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase )
342
import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _snake_case = logging.get_logger(__name__) _snake_case = { '''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''', # See all DETR models at https://huggingface.co/models?filter=detr } class _snake_case ( _lowercase ): lowerCamelCase__: str = "detr" lowerCamelCase__: Dict = ["past_key_values"] lowerCamelCase__: str = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int: if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): __UpperCAmelCase : List[Any] = backbone_config.get("model_type" ) __UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type] __UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase ) # set timm attributes to None __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None __UpperCAmelCase : Any = use_timm_backbone __UpperCAmelCase : Optional[Any] = backbone_config __UpperCAmelCase : Optional[Any] = num_channels __UpperCAmelCase : List[Any] = num_queries __UpperCAmelCase : Optional[int] = d_model __UpperCAmelCase : Optional[Any] = encoder_ffn_dim __UpperCAmelCase : Dict = encoder_layers __UpperCAmelCase : List[Any] = encoder_attention_heads __UpperCAmelCase : int = decoder_ffn_dim __UpperCAmelCase : Tuple = decoder_layers __UpperCAmelCase : int = decoder_attention_heads __UpperCAmelCase : List[Any] = dropout __UpperCAmelCase : Dict = attention_dropout __UpperCAmelCase : Optional[Any] = activation_dropout __UpperCAmelCase : int = activation_function __UpperCAmelCase : Any = init_std __UpperCAmelCase : str = init_xavier_std __UpperCAmelCase : int = encoder_layerdrop __UpperCAmelCase : Tuple = decoder_layerdrop __UpperCAmelCase : List[Any] = encoder_layers __UpperCAmelCase : Optional[Any] = auxiliary_loss __UpperCAmelCase : int = position_embedding_type __UpperCAmelCase : Optional[int] = backbone __UpperCAmelCase : str = use_pretrained_backbone __UpperCAmelCase : Dict = dilation # Hungarian matcher __UpperCAmelCase : Optional[int] = class_cost __UpperCAmelCase : Optional[Any] = bbox_cost __UpperCAmelCase : Optional[int] = giou_cost # Loss coefficients __UpperCAmelCase : Any = mask_loss_coefficient __UpperCAmelCase : Any = dice_loss_coefficient __UpperCAmelCase : Any = bbox_loss_coefficient __UpperCAmelCase : Optional[int] = giou_loss_coefficient __UpperCAmelCase : Optional[Any] = eos_coefficient super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase ) @property def _lowerCamelCase ( self: Dict ) -> int: return self.encoder_attention_heads @property def _lowerCamelCase ( self: str ) -> int: return self.d_model @classmethod def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]: return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: str ) -> Dict[str, any]: __UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: __UpperCAmelCase : int = self.backbone_config.to_dict() __UpperCAmelCase : List[str] = self.__class__.model_type return output class _snake_case ( _lowercase ): lowerCamelCase__: Optional[int] = version.parse("1.11" ) @property def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _lowerCamelCase ( self: Optional[Any] ) -> float: return 1e-5 @property def _lowerCamelCase ( self: List[str] ) -> int: return 12
342
1
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class _snake_case ( _lowercase ): lowerCamelCase__: Any = (UnCLIPScheduler,) def _lowerCamelCase ( self: Union[str, Any] , **__lowerCamelCase: Dict ) -> int: __UpperCAmelCase : str = { "num_train_timesteps": 10_00, "variance_type": "fixed_small_log", "clip_sample": True, "clip_sample_range": 1.0, "prediction_type": "epsilon", } config.update(**__lowerCamelCase ) return config def _lowerCamelCase ( self: int ) -> int: for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def _lowerCamelCase ( self: Tuple ) -> List[str]: for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=__lowerCamelCase ) def _lowerCamelCase ( self: Optional[int] ) -> str: for clip_sample in [True, False]: self.check_over_configs(clip_sample=__lowerCamelCase ) def _lowerCamelCase ( self: Optional[int] ) -> int: for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=__lowerCamelCase ) def _lowerCamelCase ( self: Optional[int] ) -> Tuple: for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def _lowerCamelCase ( self: Optional[int] ) -> int: for time_step in [0, 5_00, 9_99]: for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=__lowerCamelCase , prev_timestep=__lowerCamelCase ) def _lowerCamelCase ( self: Dict ) -> int: __UpperCAmelCase : List[str] = self.scheduler_classes[0] __UpperCAmelCase : str = self.get_scheduler_config(variance_type="fixed_small_log" ) __UpperCAmelCase : Any = scheduler_class(**__lowerCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_54_96_25 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9_99_49_87 ) ) < 1e-5 def _lowerCamelCase ( self: Optional[Any] ) -> str: __UpperCAmelCase : int = self.scheduler_classes[0] __UpperCAmelCase : Optional[Any] = self.get_scheduler_config(variance_type="learned_range" ) __UpperCAmelCase : Tuple = scheduler_class(**__lowerCamelCase ) __UpperCAmelCase : List[Any] = 0.5 assert scheduler._get_variance(1 , predicted_variance=__lowerCamelCase ) - -10.1_71_27_90 < 1e-5 assert scheduler._get_variance(4_87 , predicted_variance=__lowerCamelCase ) - -5.7_99_80_52 < 1e-5 assert scheduler._get_variance(9_99 , predicted_variance=__lowerCamelCase ) - -0.0_01_00_11 < 1e-5 def _lowerCamelCase ( self: Dict ) -> List[str]: __UpperCAmelCase : Optional[int] = self.scheduler_classes[0] __UpperCAmelCase : List[Any] = self.get_scheduler_config() __UpperCAmelCase : Union[str, Any] = scheduler_class(**__lowerCamelCase ) __UpperCAmelCase : str = scheduler.timesteps __UpperCAmelCase : Any = self.dummy_model() __UpperCAmelCase : Dict = self.dummy_sample_deter __UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) for i, t in enumerate(__lowerCamelCase ): # 1. predict noise residual __UpperCAmelCase : str = model(__lowerCamelCase , __lowerCamelCase ) # 2. predict previous mean of sample x_t-1 __UpperCAmelCase : List[Any] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample __UpperCAmelCase : List[str] = pred_prev_sample __UpperCAmelCase : List[str] = torch.sum(torch.abs(__lowerCamelCase ) ) __UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1e-2 assert abs(result_mean.item() - 0.3_28_47_43 ) < 1e-3 def _lowerCamelCase ( self: Optional[Any] ) -> Union[str, Any]: __UpperCAmelCase : int = self.scheduler_classes[0] __UpperCAmelCase : Tuple = self.get_scheduler_config() __UpperCAmelCase : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(25 ) __UpperCAmelCase : Union[str, Any] = scheduler.timesteps __UpperCAmelCase : Union[str, Any] = self.dummy_model() __UpperCAmelCase : str = self.dummy_sample_deter __UpperCAmelCase : List[Any] = torch.manual_seed(0 ) for i, t in enumerate(__lowerCamelCase ): # 1. predict noise residual __UpperCAmelCase : List[str] = model(__lowerCamelCase , __lowerCamelCase ) if i + 1 == timesteps.shape[0]: __UpperCAmelCase : Union[str, Any] = None else: __UpperCAmelCase : List[str] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 __UpperCAmelCase : List[str] = scheduler.step( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prev_timestep=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample __UpperCAmelCase : Optional[int] = pred_prev_sample __UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(__lowerCamelCase ) ) __UpperCAmelCase : Dict = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1e-2 assert abs(result_mean.item() - 0.3_36_20_38 ) < 1e-3 def _lowerCamelCase ( self: Optional[int] ) -> int: pass def _lowerCamelCase ( self: Optional[int] ) -> Union[str, Any]: pass
342
from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=1e-1_2 ) -> str: __UpperCAmelCase : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T __UpperCAmelCase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T return jnp.matmul(snake_case__, norm_emb_a.T ) class _snake_case ( nn.Module ): lowerCamelCase__: CLIPConfig lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: Any ) -> Tuple: __UpperCAmelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config ) __UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype ) __UpperCAmelCase : int = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) ) __UpperCAmelCase : int = self.param( "special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) ) __UpperCAmelCase : Tuple = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) ) __UpperCAmelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) ) def __call__( self: List[Any] , __lowerCamelCase: Dict ) -> Dict: __UpperCAmelCase : Optional[int] = self.vision_model(__lowerCamelCase )[1] __UpperCAmelCase : List[str] = self.visual_projection(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds ) __UpperCAmelCase : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs __UpperCAmelCase : List[str] = 0.0 __UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment __UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 ) __UpperCAmelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase ) # Use a lower threshold if an image has any special care concept __UpperCAmelCase : List[Any] = is_special_care * 0.01 __UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment __UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 ) __UpperCAmelCase : Any = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class _snake_case ( _lowercase ): lowerCamelCase__: int = CLIPConfig lowerCamelCase__: Tuple = "clip_input" lowerCamelCase__: str = FlaxStableDiffusionSafetyCheckerModule def __init__( self: Union[str, Any] , __lowerCamelCase: CLIPConfig , __lowerCamelCase: Optional[Tuple] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: jnp.dtype = jnp.floataa , __lowerCamelCase: bool = True , **__lowerCamelCase: Optional[int] , ) -> int: if input_shape is None: __UpperCAmelCase : Dict = (1, 2_24, 2_24, 3) __UpperCAmelCase : Tuple = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase ) super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: jax.random.KeyArray , __lowerCamelCase: Tuple , __lowerCamelCase: FrozenDict = None ) -> FrozenDict: # init input tensor __UpperCAmelCase : Tuple = jax.random.normal(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Dict = jax.random.split(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng} __UpperCAmelCase : str = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"] return random_params def __call__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: dict = None , ) -> List[Any]: __UpperCAmelCase : int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) ) return self.module.apply( {"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
342
1
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device _snake_case = False class _snake_case ( unittest.TestCase ): pass @nightly @require_torch_gpu class _snake_case ( unittest.TestCase ): def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self: str ) -> Union[str, Any]: __UpperCAmelCase : List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" ) # remove text_unet pipe.remove_unused_weights() pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) __UpperCAmelCase : Optional[int] = "A painting of a squirrel eating a burger " __UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) __UpperCAmelCase : Union[str, Any] = pipe( prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__lowerCamelCase ) __UpperCAmelCase : Any = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = generator.manual_seed(0 ) __UpperCAmelCase : Dict = pipe( prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def _lowerCamelCase ( self: int ) -> List[str]: __UpperCAmelCase : List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained( "shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) __UpperCAmelCase : Tuple = "A painting of a squirrel eating a burger " __UpperCAmelCase : str = torch.manual_seed(0 ) __UpperCAmelCase : str = pipe( prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images __UpperCAmelCase : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) __UpperCAmelCase : Tuple = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
342
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def _UpperCamelCase ( snake_case__ ) -> Tuple: __UpperCAmelCase : Union[str, Any] = 384 if "tiny" in model_name: __UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3] __UpperCAmelCase : List[Any] = [96, 192, 384, 768] if "small" in model_name: __UpperCAmelCase : Tuple = [3, 3, 27, 3] __UpperCAmelCase : Any = [96, 192, 384, 768] if "base" in model_name: __UpperCAmelCase : str = [3, 3, 27, 3] __UpperCAmelCase : str = [128, 256, 512, 1024] __UpperCAmelCase : str = 512 if "large" in model_name: __UpperCAmelCase : Dict = [3, 3, 27, 3] __UpperCAmelCase : int = [192, 384, 768, 1536] __UpperCAmelCase : Dict = 768 if "xlarge" in model_name: __UpperCAmelCase : List[Any] = [3, 3, 27, 3] __UpperCAmelCase : Tuple = [256, 512, 1024, 2048] __UpperCAmelCase : int = 1024 # set label information __UpperCAmelCase : List[Any] = 150 __UpperCAmelCase : str = "huggingface/label-files" __UpperCAmelCase : List[Any] = "ade20k-id2label.json" __UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) ) __UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()} __UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()} __UpperCAmelCase : int = ConvNextConfig( depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] ) __UpperCAmelCase : int = UperNetConfig( backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, ) return config def _UpperCamelCase ( snake_case__ ) -> Tuple: __UpperCAmelCase : Optional[int] = [] # fmt: off # stem rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") ) rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") ) rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") ) rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any: __UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ ) __UpperCAmelCase : Optional[int] = val def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]: __UpperCAmelCase : Dict = { "upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth", "upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth", "upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth", "upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth", "upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth", } __UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name] __UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"] __UpperCAmelCase : Dict = get_upernet_config(snake_case__ ) __UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): __UpperCAmelCase : str = state_dict.pop(snake_case__ ) if "bn" in key: __UpperCAmelCase : int = key.replace("bn", "batch_norm" ) __UpperCAmelCase : Union[str, Any] = val # rename keys __UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__, snake_case__, snake_case__ ) model.load_state_dict(snake_case__ ) # verify on image __UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" __UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" ) __UpperCAmelCase : str = SegformerImageProcessor() __UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values with torch.no_grad(): __UpperCAmelCase : Union[str, Any] = model(snake_case__ ) if model_name == "upernet-convnext-tiny": __UpperCAmelCase : Any = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ) elif model_name == "upernet-convnext-small": __UpperCAmelCase : Optional[Any] = torch.tensor( [[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] ) elif model_name == "upernet-convnext-base": __UpperCAmelCase : Dict = torch.tensor( [[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] ) elif model_name == "upernet-convnext-large": __UpperCAmelCase : Tuple = torch.tensor( [[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] ) elif model_name == "upernet-convnext-xlarge": __UpperCAmelCase : Union[str, Any] = torch.tensor( [[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] ) print("Logits:", outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case__ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(snake_case__ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-convnext-tiny''', type=str, choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']], help='''Name of the ConvNext UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _snake_case = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
342
1
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings _snake_case = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class _snake_case ( _lowercase ): lowerCamelCase__: bool = field(default=_lowercase , metadata={"help": "Whether to use SortishSampler or not."} ) lowerCamelCase__: bool = field( default=_lowercase , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowerCamelCase__: Optional[int] = field( default=_lowercase , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowerCamelCase__: Optional[int] = field( default=_lowercase , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowerCamelCase__: Optional[Union[str, Path, GenerationConfig]] = field( default=_lowercase , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def _lowerCamelCase ( self: int ) -> Dict: __UpperCAmelCase : str = super().to_dict() for k, v in d.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): __UpperCAmelCase : Tuple = v.to_dict() return d
342
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class _snake_case ( _lowercase ): lowerCamelCase__: Dict = "roc_bert" def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]: __UpperCAmelCase : str = vocab_size __UpperCAmelCase : Dict = max_position_embeddings __UpperCAmelCase : Optional[Any] = hidden_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : List[Any] = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : Optional[int] = attention_probs_dropout_prob __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : Optional[Any] = type_vocab_size __UpperCAmelCase : List[Any] = layer_norm_eps __UpperCAmelCase : Optional[int] = use_cache __UpperCAmelCase : Optional[Any] = enable_pronunciation __UpperCAmelCase : Any = enable_shape __UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim __UpperCAmelCase : Optional[Any] = pronunciation_vocab_size __UpperCAmelCase : Optional[Any] = shape_embed_dim __UpperCAmelCase : List[Any] = shape_vocab_size __UpperCAmelCase : int = concat_input __UpperCAmelCase : int = position_embedding_type __UpperCAmelCase : Optional[int] = classifier_dropout super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
342
1
import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> Any: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file __UpperCAmelCase : List[Any] = TapasConfig.from_json_file(snake_case__ ) # set absolute/relative position embeddings parameter __UpperCAmelCase : Optional[int] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": __UpperCAmelCase : str = TapasForQuestionAnswering(config=snake_case__ ) elif task == "WTQ": # run_task_main.py hparams __UpperCAmelCase : Union[str, Any] = 4 __UpperCAmelCase : Any = True # hparam_utils.py hparams __UpperCAmelCase : int = 0.66_4694 __UpperCAmelCase : List[str] = 0.20_7951 __UpperCAmelCase : Tuple = 0.12_1194 __UpperCAmelCase : Tuple = True __UpperCAmelCase : Optional[Any] = True __UpperCAmelCase : str = False __UpperCAmelCase : int = 0.035_2513 __UpperCAmelCase : Any = TapasForQuestionAnswering(config=snake_case__ ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams __UpperCAmelCase : List[Any] = 4 __UpperCAmelCase : Union[str, Any] = False # hparam_utils.py hparams __UpperCAmelCase : Tuple = 36.4519 __UpperCAmelCase : List[str] = 0.90_3421 __UpperCAmelCase : Dict = 222.088 __UpperCAmelCase : Dict = True __UpperCAmelCase : Optional[Any] = True __UpperCAmelCase : Tuple = True __UpperCAmelCase : List[Any] = 0.76_3141 __UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=snake_case__ ) elif task == "TABFACT": __UpperCAmelCase : Optional[int] = TapasForSequenceClassification(config=snake_case__ ) elif task == "MLM": __UpperCAmelCase : Tuple = TapasForMaskedLM(config=snake_case__ ) elif task == "INTERMEDIATE_PRETRAINING": __UpperCAmelCase : List[str] = TapasModel(config=snake_case__ ) else: raise ValueError(f'''Task {task} not supported.''' ) print(f'''Building PyTorch model from configuration: {config}''' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(snake_case__, snake_case__, snake_case__ ) # Save pytorch-model (weights and configuration) print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(snake_case__ ) # Save tokenizer files print(f'''Save tokenizer files to {pytorch_dump_path}''' ) __UpperCAmelCase : Any = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt", model_max_length=512 ) tokenizer.save_pretrained(snake_case__ ) print("Used relative position embeddings:", model.config.reset_position_index_per_cell ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _snake_case = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
342
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def _UpperCamelCase ( snake_case__ ) -> int: __UpperCAmelCase : int = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: __UpperCAmelCase : int = [144, 192, 240] __UpperCAmelCase : Optional[Any] = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: __UpperCAmelCase : Optional[Any] = [96, 120, 144] __UpperCAmelCase : Tuple = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: __UpperCAmelCase : str = [64, 80, 96] __UpperCAmelCase : Optional[Any] = [16, 16, 24, 48, 64, 80, 320] __UpperCAmelCase : Tuple = 0.05 __UpperCAmelCase : Dict = 2.0 if mobilevit_name.startswith("deeplabv3_" ): __UpperCAmelCase : str = 512 __UpperCAmelCase : Any = 16 __UpperCAmelCase : str = 21 __UpperCAmelCase : Union[str, Any] = "pascal-voc-id2label.json" else: __UpperCAmelCase : Optional[Any] = 1000 __UpperCAmelCase : int = "imagenet-1k-id2label.json" __UpperCAmelCase : Dict = "huggingface/label-files" __UpperCAmelCase : int = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) ) __UpperCAmelCase : Any = {int(snake_case__ ): v for k, v in idalabel.items()} __UpperCAmelCase : int = idalabel __UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()} return config def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Tuple: for i in range(1, 6 ): if f'''layer_{i}.''' in name: __UpperCAmelCase : Tuple = name.replace(f'''layer_{i}.''', f'''encoder.layer.{i - 1}.''' ) if "conv_1." in name: __UpperCAmelCase : Dict = name.replace("conv_1.", "conv_stem." ) if ".block." in name: __UpperCAmelCase : Optional[int] = name.replace(".block.", "." ) if "exp_1x1" in name: __UpperCAmelCase : Tuple = name.replace("exp_1x1", "expand_1x1" ) if "red_1x1" in name: __UpperCAmelCase : Optional[Any] = name.replace("red_1x1", "reduce_1x1" ) if ".local_rep.conv_3x3." in name: __UpperCAmelCase : Optional[int] = name.replace(".local_rep.conv_3x3.", ".conv_kxk." ) if ".local_rep.conv_1x1." in name: __UpperCAmelCase : Any = name.replace(".local_rep.conv_1x1.", ".conv_1x1." ) if ".norm." in name: __UpperCAmelCase : Dict = name.replace(".norm.", ".normalization." ) if ".conv." in name: __UpperCAmelCase : List[Any] = name.replace(".conv.", ".convolution." ) if ".conv_proj." in name: __UpperCAmelCase : List[str] = name.replace(".conv_proj.", ".conv_projection." ) for i in range(0, 2 ): for j in range(0, 4 ): if f'''.{i}.{j}.''' in name: __UpperCAmelCase : List[Any] = name.replace(f'''.{i}.{j}.''', f'''.{i}.layer.{j}.''' ) for i in range(2, 6 ): for j in range(0, 4 ): if f'''.{i}.{j}.''' in name: __UpperCAmelCase : Any = name.replace(f'''.{i}.{j}.''', f'''.{i}.''' ) if "expand_1x1" in name: __UpperCAmelCase : Optional[int] = name.replace("expand_1x1", "downsampling_layer.expand_1x1" ) if "conv_3x3" in name: __UpperCAmelCase : List[Any] = name.replace("conv_3x3", "downsampling_layer.conv_3x3" ) if "reduce_1x1" in name: __UpperCAmelCase : Dict = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" ) for i in range(2, 5 ): if f'''.global_rep.{i}.weight''' in name: __UpperCAmelCase : Any = name.replace(f'''.global_rep.{i}.weight''', ".layernorm.weight" ) if f'''.global_rep.{i}.bias''' in name: __UpperCAmelCase : Optional[Any] = name.replace(f'''.global_rep.{i}.bias''', ".layernorm.bias" ) if ".global_rep." in name: __UpperCAmelCase : Tuple = name.replace(".global_rep.", ".transformer." ) if ".pre_norm_mha.0." in name: __UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_mha.0.", ".layernorm_before." ) if ".pre_norm_mha.1.out_proj." in name: __UpperCAmelCase : Tuple = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." ) if ".pre_norm_ffn.0." in name: __UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_ffn.0.", ".layernorm_after." ) if ".pre_norm_ffn.1." in name: __UpperCAmelCase : Dict = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." ) if ".pre_norm_ffn.4." in name: __UpperCAmelCase : int = name.replace(".pre_norm_ffn.4.", ".output.dense." ) if ".transformer." in name: __UpperCAmelCase : Tuple = name.replace(".transformer.", ".transformer.layer." ) if ".aspp_layer." in name: __UpperCAmelCase : Any = name.replace(".aspp_layer.", "." ) if ".aspp_pool." in name: __UpperCAmelCase : Optional[Any] = name.replace(".aspp_pool.", "." ) if "seg_head." in name: __UpperCAmelCase : Optional[int] = name.replace("seg_head.", "segmentation_head." ) if "segmentation_head.classifier.classifier." in name: __UpperCAmelCase : str = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." ) if "classifier.fc." in name: __UpperCAmelCase : Optional[Any] = name.replace("classifier.fc.", "classifier." ) elif (not base_model) and ("segmentation_head." not in name): __UpperCAmelCase : List[str] = "mobilevit." + name return name def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=False ) -> Union[str, Any]: if base_model: __UpperCAmelCase : Optional[int] = "" else: __UpperCAmelCase : Tuple = "mobilevit." for key in orig_state_dict.copy().keys(): __UpperCAmelCase : Optional[int] = orig_state_dict.pop(snake_case__ ) if key[:8] == "encoder.": __UpperCAmelCase : str = key[8:] if "qkv" in key: __UpperCAmelCase : Tuple = key.split("." ) __UpperCAmelCase : List[Any] = int(key_split[0][6:] ) - 1 __UpperCAmelCase : Optional[Any] = int(key_split[3] ) __UpperCAmelCase : Tuple = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' ) __UpperCAmelCase : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size __UpperCAmelCase : Optional[Any] = ( f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.''' ) if "weight" in key: __UpperCAmelCase : Any = val[:dim, :] __UpperCAmelCase : Any = val[dim : dim * 2, :] __UpperCAmelCase : List[Any] = val[-dim:, :] else: __UpperCAmelCase : List[str] = val[:dim] __UpperCAmelCase : Optional[Any] = val[dim : dim * 2] __UpperCAmelCase : List[Any] = val[-dim:] else: __UpperCAmelCase : str = val return orig_state_dict def _UpperCamelCase ( ) -> Any: __UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" __UpperCAmelCase : List[str] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ) return im @torch.no_grad() def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=False ) -> Optional[Any]: __UpperCAmelCase : Tuple = get_mobilevit_config(snake_case__ ) # load original state_dict __UpperCAmelCase : str = torch.load(snake_case__, map_location="cpu" ) # load 🤗 model if mobilevit_name.startswith("deeplabv3_" ): __UpperCAmelCase : Optional[int] = MobileViTForSemanticSegmentation(snake_case__ ).eval() else: __UpperCAmelCase : List[Any] = MobileViTForImageClassification(snake_case__ ).eval() __UpperCAmelCase : Dict = convert_state_dict(snake_case__, snake_case__ ) model.load_state_dict(snake_case__ ) # Check outputs on an image, prepared by MobileViTImageProcessor __UpperCAmelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 ) __UpperCAmelCase : Any = image_processor(images=prepare_img(), return_tensors="pt" ) __UpperCAmelCase : Dict = model(**snake_case__ ) __UpperCAmelCase : Tuple = outputs.logits if mobilevit_name.startswith("deeplabv3_" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": __UpperCAmelCase : int = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": __UpperCAmelCase : Tuple = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": __UpperCAmelCase : Any = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ] ) else: raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3, :3, :3], snake_case__, atol=1e-4 ) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": __UpperCAmelCase : str = torch.tensor([-0.9866, 0.2392, -1.1241] ) elif mobilevit_name == "mobilevit_xs": __UpperCAmelCase : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] ) elif mobilevit_name == "mobilevit_xxs": __UpperCAmelCase : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ) else: raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3], snake_case__, atol=1e-4 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case__ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case__ ) if push_to_hub: __UpperCAmelCase : List[str] = { "mobilevit_s": "mobilevit-small", "mobilevit_xs": "mobilevit-x-small", "mobilevit_xxs": "mobilevit-xx-small", "deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small", "deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small", "deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small", } print("Pushing to the hub..." ) __UpperCAmelCase : int = model_mapping[mobilevit_name] image_processor.push_to_hub(snake_case__, organization="apple" ) model.push_to_hub(snake_case__, organization="apple" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--mobilevit_name''', default='''mobilevit_s''', type=str, help=( '''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',''' ''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.''' ), ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _snake_case = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
342
1
import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class _snake_case ( _lowercase ): lowerCamelCase__: Any = ["image_processor", "tokenizer"] lowerCamelCase__: Optional[Any] = "BlipImageProcessor" lowerCamelCase__: Optional[int] = "AutoTokenizer" def __init__( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict: super().__init__(__lowerCamelCase , __lowerCamelCase ) # add QFormer tokenizer __UpperCAmelCase : Dict = qformer_tokenizer def __call__( self: Any , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Dict , ) -> BatchFeature: if images is None and text is None: raise ValueError("You have to specify at least images or text." ) __UpperCAmelCase : str = BatchFeature() if text is not None: __UpperCAmelCase : Any = self.tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) encoding.update(__lowerCamelCase ) __UpperCAmelCase : Dict = self.qformer_tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) __UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" ) __UpperCAmelCase : Optional[int] = qformer_text_encoding.pop("attention_mask" ) if images is not None: __UpperCAmelCase : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase ) encoding.update(__lowerCamelCase ) return encoding def _lowerCamelCase ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Optional[Any]: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: Dict ) -> Tuple: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def _lowerCamelCase ( self: List[str] ) -> Tuple: __UpperCAmelCase : str = self.tokenizer.model_input_names __UpperCAmelCase : Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> str: if os.path.isfile(__lowerCamelCase ): raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) __UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , "qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(__lowerCamelCase ) return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase ) @classmethod def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]: __UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" ) __UpperCAmelCase : List[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase ) args.append(__lowerCamelCase ) return cls(*__lowerCamelCase )
342
import math _snake_case = 10 _snake_case = 7 _snake_case = BALLS_PER_COLOUR * NUM_COLOURS def _UpperCamelCase ( snake_case__ = 20 ) -> str: __UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ ) __UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ ) __UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total) return f'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
342
1
import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class _snake_case ( unittest.TestCase ): def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str: __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : Optional[int] = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : List[str] = use_attention_mask __UpperCAmelCase : Dict = use_token_type_ids __UpperCAmelCase : Optional[int] = use_labels __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Dict = num_hidden_layers __UpperCAmelCase : Dict = num_attention_heads __UpperCAmelCase : Tuple = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_act __UpperCAmelCase : Tuple = hidden_dropout_prob __UpperCAmelCase : str = attention_probs_dropout_prob __UpperCAmelCase : Optional[Any] = max_position_embeddings __UpperCAmelCase : Optional[int] = type_vocab_size __UpperCAmelCase : str = type_sequence_label_size __UpperCAmelCase : Tuple = initializer_range __UpperCAmelCase : str = num_choices def _lowerCamelCase ( self: Optional[Any] ) -> List[str]: __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : str = None if self.use_attention_mask: __UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Any = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , ) return config, input_ids, attention_mask def _lowerCamelCase ( self: str ) -> Any: __UpperCAmelCase : List[str] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs __UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class _snake_case ( _lowercase , unittest.TestCase ): lowerCamelCase__: str = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self: List[Any] ) -> Dict: __UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self ) @slow def _lowerCamelCase ( self: Tuple ) -> Optional[Any]: for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" ) __UpperCAmelCase : Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCamelCase ) @require_flax class _snake_case ( unittest.TestCase ): @slow def _lowerCamelCase ( self: int ) -> List[Any]: __UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" ) __UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] __UpperCAmelCase : str = (1, 11, 7_68) self.assertEqual(output.shape , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
342
def _UpperCamelCase ( snake_case__ ) -> int: __UpperCAmelCase : int = [0] * len(snake_case__ ) __UpperCAmelCase : Union[str, Any] = [] __UpperCAmelCase : str = [1] * len(snake_case__ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(snake_case__ ) ): if indegree[i] == 0: queue.append(snake_case__ ) while queue: __UpperCAmelCase : List[str] = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: __UpperCAmelCase : str = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(snake_case__ ) print(max(snake_case__ ) ) # Adjacency list of Graph _snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
342
1
import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 _snake_case = 0b101_100_111_110_110_010_010_000_011_110_111_011_000_110_011_110 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 _snake_case = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class _snake_case : def __init__( self: List[str] ) -> List[str]: __UpperCAmelCase : List[str] = WATERMARK_BITS __UpperCAmelCase : Optional[Any] = WatermarkEncoder() self.encoder.set_watermark("bits" , self.watermark ) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: torch.FloatTensor ) -> Optional[Any]: # can't encode images that are smaller than 256 if images.shape[-1] < 2_56: return images __UpperCAmelCase : Dict = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __UpperCAmelCase : Dict = [self.encoder.encode(__lowerCamelCase , "dwtDct" ) for image in images] __UpperCAmelCase : Dict = torch.from_numpy(np.array(__lowerCamelCase ) ).permute(0 , 3 , 1 , 2 ) __UpperCAmelCase : Dict = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0 ) return images
342
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _snake_case = { '''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''], '''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''], '''processing_whisper''': ['''WhisperProcessor'''], '''tokenization_whisper''': ['''WhisperTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''WhisperTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WhisperForConditionalGeneration''', '''WhisperModel''', '''WhisperPreTrainedModel''', '''WhisperForAudioClassification''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWhisperForConditionalGeneration''', '''TFWhisperModel''', '''TFWhisperPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''FlaxWhisperForConditionalGeneration''', '''FlaxWhisperModel''', '''FlaxWhisperPreTrainedModel''', '''FlaxWhisperForAudioClassification''', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
342
1
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _snake_case = logging.get_logger(__name__) _snake_case = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } _snake_case = { '''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''}, '''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''}, '''tokenizer_config_file''': { '''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json''' }, } _snake_case = {'''facebook/blenderbot-3B''': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _UpperCamelCase ( ) -> Dict: __UpperCAmelCase : Tuple = ( list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) ) ) __UpperCAmelCase : str = bs[:] __UpperCAmelCase : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(snake_case__ ) cs.append(2**8 + n ) n += 1 __UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs] return dict(zip(snake_case__, snake_case__ ) ) def _UpperCamelCase ( snake_case__ ) -> Any: __UpperCAmelCase : List[Any] = set() __UpperCAmelCase : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase : Union[str, Any] = char return pairs class _snake_case ( _lowercase ): lowerCamelCase__: str = VOCAB_FILES_NAMES lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__: Dict = ["input_ids", "attention_mask"] def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]: __UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token __UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token __UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token __UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token __UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token __UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: __UpperCAmelCase : List[Any] = json.load(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()} __UpperCAmelCase : Dict = errors # how to handle errors in decoding __UpperCAmelCase : Optional[int] = bytes_to_unicode() __UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: __UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1] __UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges] __UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : Tuple = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _lowerCamelCase ( self: Dict ) -> Any: return len(self.encoder ) def _lowerCamelCase ( self: Optional[Any] ) -> List[str]: return dict(self.encoder , **self.added_tokens_encoder ) def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]: if token in self.cache: return self.cache[token] __UpperCAmelCase : List[Any] = tuple(__lowerCamelCase ) __UpperCAmelCase : Dict = get_pairs(__lowerCamelCase ) if not pairs: return token while True: __UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : str = 0 while i < len(__lowerCamelCase ): try: __UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase : Union[str, Any] = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase : List[Any] = tuple(__lowerCamelCase ) __UpperCAmelCase : str = new_word if len(__lowerCamelCase ) == 1: break else: __UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = word return word def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict: __UpperCAmelCase : Any = [] for token in re.findall(self.pat , __lowerCamelCase ): __UpperCAmelCase : int = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict: return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]: return self.decoder.get(__lowerCamelCase ) def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int: __UpperCAmelCase : Dict = "".join(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase : Any = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __UpperCAmelCase : Dict = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) __UpperCAmelCase : Optional[Any] = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) __UpperCAmelCase : Optional[Any] = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]: __UpperCAmelCase : int = [self.sep_token_id] __UpperCAmelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]: __UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): __UpperCAmelCase : Optional[Any] = " " + text return (text, kwargs) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]: return token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]: __UpperCAmelCase : Tuple = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase ) if len(__lowerCamelCase ) > self.model_max_length: __UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
342
from __future__ import annotations from math import pi def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]: if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
342
1
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _snake_case = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''') @require_sentencepiece @require_tokenizers class _snake_case ( _lowercase , unittest.TestCase ): lowerCamelCase__: int = GPTSwaTokenizer lowerCamelCase__: int = False lowerCamelCase__: str = True lowerCamelCase__: Optional[int] = False def _lowerCamelCase ( self: Union[str, Any] ) -> List[str]: super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase : int = GPTSwaTokenizer(__lowerCamelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def _lowerCamelCase ( self: Any , __lowerCamelCase: List[Any] ) -> Tuple: __UpperCAmelCase : List[Any] = "This is a test" __UpperCAmelCase : Any = "This is a test" return input_text, output_text def _lowerCamelCase ( self: Dict ) -> Union[str, Any]: __UpperCAmelCase : Optional[Any] = "<s>" __UpperCAmelCase : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def _lowerCamelCase ( self: List[Any] ) -> Optional[int]: __UpperCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(__lowerCamelCase ) , 20_00 ) def _lowerCamelCase ( self: List[str] ) -> str: self.assertEqual(self.get_tokenizer().vocab_size , 20_00 ) def _lowerCamelCase ( self: Optional[Any] ) -> Optional[Any]: __UpperCAmelCase : Optional[Any] = GPTSwaTokenizer(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [4_65, 2_87, 2_65, 6_31, 8_42] ) __UpperCAmelCase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." ) # fmt: off self.assertListEqual( __lowerCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , ) # fmt: on __UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , ) __UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) # fmt: off self.assertListEqual( __lowerCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] ) # fmt: on def _lowerCamelCase ( self: List[str] ) -> Union[str, Any]: __UpperCAmelCase : List[str] = GPTSwaTokenizer(__lowerCamelCase ) __UpperCAmelCase : str = ["This is a test", "I was born in 92000, and this is falsé."] __UpperCAmelCase : Optional[int] = [ [4_65, 2_87, 2_65, 6_31, 8_42], [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(__lowerCamelCase , __lowerCamelCase ): self.assertListEqual(tokenizer.encode_fast(__lowerCamelCase ) , __lowerCamelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(__lowerCamelCase , __lowerCamelCase ): self.assertEqual(tokenizer.decode_fast(__lowerCamelCase ) , __lowerCamelCase ) @slow def _lowerCamelCase ( self: Optional[int] ) -> Dict: __UpperCAmelCase : str = [ "<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] # fmt: off __UpperCAmelCase : List[Any] = {"input_ids": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=__lowerCamelCase , )
342
import flax.linen as nn import jax import jax.numpy as jnp class _snake_case ( nn.Module ): lowerCamelCase__: int lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]: __UpperCAmelCase : List[str] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Optional[Any] , __lowerCamelCase: Optional[int] ) -> List[Any]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = hidden_states.shape __UpperCAmelCase : Dict = jax.image.resize( __lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , ) __UpperCAmelCase : Dict = self.conv(__lowerCamelCase ) return hidden_states class _snake_case ( nn.Module ): lowerCamelCase__: int lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: str ) -> Any: __UpperCAmelCase : Optional[int] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Dict , __lowerCamelCase: str ) -> List[Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) __UpperCAmelCase : Any = self.conv(__lowerCamelCase ) return hidden_states class _snake_case ( nn.Module ): lowerCamelCase__: int lowerCamelCase__: int = None lowerCamelCase__: float = 0.0 lowerCamelCase__: bool = None lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: str ) -> List[str]: __UpperCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels __UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) __UpperCAmelCase : List[str] = nn.Conv( __lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __UpperCAmelCase : Optional[Any] = nn.Dense(__lowerCamelCase , dtype=self.dtype ) __UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) __UpperCAmelCase : Optional[Any] = nn.Dropout(self.dropout_prob ) __UpperCAmelCase : Tuple = nn.Conv( __lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __UpperCAmelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __UpperCAmelCase : List[Any] = None if use_nin_shortcut: __UpperCAmelCase : Dict = nn.Conv( __lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , ) def __call__( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=True ) -> List[Any]: __UpperCAmelCase : Dict = hidden_states __UpperCAmelCase : int = self.norma(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = nn.swish(__lowerCamelCase ) __UpperCAmelCase : Tuple = self.conva(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) ) __UpperCAmelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 ) __UpperCAmelCase : List[str] = hidden_states + temb __UpperCAmelCase : Union[str, Any] = self.norma(__lowerCamelCase ) __UpperCAmelCase : Tuple = nn.swish(__lowerCamelCase ) __UpperCAmelCase : str = self.dropout(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : List[str] = self.conva(__lowerCamelCase ) if self.conv_shortcut is not None: __UpperCAmelCase : Optional[int] = self.conv_shortcut(__lowerCamelCase ) return hidden_states + residual
342
1
# This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests _snake_case = open # noqa: we just need to have a builtin inside this module to test it properly
342
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss _snake_case = pytest.mark.integration @require_faiss class _snake_case ( _lowercase ): def _lowerCamelCase ( self: Union[str, Any] ) -> str: __UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} ) return dset def _lowerCamelCase ( self: Optional[Any] ) -> Tuple: import faiss __UpperCAmelCase : Dataset = self._create_dummy_dataset() __UpperCAmelCase : int = dset.map( lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase ) __UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT ) __UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def _lowerCamelCase ( self: List[str] ) -> int: import faiss __UpperCAmelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def _lowerCamelCase ( self: Optional[int] ) -> Dict: import faiss __UpperCAmelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def _lowerCamelCase ( self: List[Any] ) -> List[Any]: __UpperCAmelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def _lowerCamelCase ( self: List[str] ) -> Dict: from elasticsearch import Elasticsearch __UpperCAmelCase : Dataset = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: __UpperCAmelCase : int = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) __UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} __UpperCAmelCase : Any = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class _snake_case ( _lowercase ): def _lowerCamelCase ( self: List[str] ) -> Optional[int]: import faiss __UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa ) __UpperCAmelCase : List[str] = 1 __UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase ) self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1] __UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase ) self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] ) __UpperCAmelCase : Dict = [scores[0] for scores in total_scores] __UpperCAmelCase : int = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCamelCase ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase ) def _lowerCamelCase ( self: Any ) -> List[str]: import faiss __UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(__lowerCamelCase ): __UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def _lowerCamelCase ( self: List[str] ) -> Dict: import faiss __UpperCAmelCase : str = faiss.IndexFlat(5 ) __UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def _lowerCamelCase ( self: Union[str, Any] ) -> int: import faiss __UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file: index.save(tmp_file.name ) __UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa ) __UpperCAmelCase : Tuple = 1 __UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def _UpperCamelCase ( snake_case__ ) -> Optional[Any]: import faiss __UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5, dtype=np.floataa ) ) __UpperCAmelCase : Optional[Any] = "index.faiss" __UpperCAmelCase : Optional[int] = f'''mock://{index_name}''' index.save(snake_case__, storage_options=mockfs.storage_options ) __UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options ) __UpperCAmelCase : str = np.zeros(5, dtype=np.floataa ) __UpperCAmelCase : Any = 1 __UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _snake_case ( _lowercase ): def _lowerCamelCase ( self: str ) -> Union[str, Any]: from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: __UpperCAmelCase : Optional[Any] = Elasticsearch() __UpperCAmelCase : Dict = {"acknowledged": True} __UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query __UpperCAmelCase : Dict = "foo" __UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} __UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __UpperCAmelCase : int = "foo" __UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __UpperCAmelCase : int = ["foo", "bar", "foobar"] __UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} __UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase ) __UpperCAmelCase : Tuple = [scores[0] for scores in total_scores] __UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCamelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __lowerCamelCase ) # batched queries with timeout __UpperCAmelCase : str = ["foo", "bar", "foobar"] __UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 ) __UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores] __UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCamelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __lowerCamelCase )
342
1
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _snake_case ( _lowercase ): def __init__( self: Optional[Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str: super().__init__( __lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , ) __UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths} __UpperCAmelCase : int = Text( cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , ) def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]: # Build iterable dataset if self.streaming: __UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCAmelCase : Any = None __UpperCAmelCase : Any = None __UpperCAmelCase : Dict = None __UpperCAmelCase : str = None self.builder.download_and_prepare( download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , ) __UpperCAmelCase : Dict = self.builder.as_dataset( split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory ) return dataset
342
import argparse import struct import unittest class _snake_case : def __init__( self: Tuple , __lowerCamelCase: bytes ) -> None: __UpperCAmelCase : Tuple = data # Initialize hash values __UpperCAmelCase : Any = [ 0x6_A_0_9_E_6_6_7, 0xB_B_6_7_A_E_8_5, 0x3_C_6_E_F_3_7_2, 0xA_5_4_F_F_5_3_A, 0x5_1_0_E_5_2_7_F, 0x9_B_0_5_6_8_8_C, 0x1_F_8_3_D_9_A_B, 0x5_B_E_0_C_D_1_9, ] # Initialize round constants __UpperCAmelCase : Dict = [ 0x4_2_8_A_2_F_9_8, 0x7_1_3_7_4_4_9_1, 0xB_5_C_0_F_B_C_F, 0xE_9_B_5_D_B_A_5, 0x3_9_5_6_C_2_5_B, 0x5_9_F_1_1_1_F_1, 0x9_2_3_F_8_2_A_4, 0xA_B_1_C_5_E_D_5, 0xD_8_0_7_A_A_9_8, 0x1_2_8_3_5_B_0_1, 0x2_4_3_1_8_5_B_E, 0x5_5_0_C_7_D_C_3, 0x7_2_B_E_5_D_7_4, 0x8_0_D_E_B_1_F_E, 0x9_B_D_C_0_6_A_7, 0xC_1_9_B_F_1_7_4, 0xE_4_9_B_6_9_C_1, 0xE_F_B_E_4_7_8_6, 0x0_F_C_1_9_D_C_6, 0x2_4_0_C_A_1_C_C, 0x2_D_E_9_2_C_6_F, 0x4_A_7_4_8_4_A_A, 0x5_C_B_0_A_9_D_C, 0x7_6_F_9_8_8_D_A, 0x9_8_3_E_5_1_5_2, 0xA_8_3_1_C_6_6_D, 0xB_0_0_3_2_7_C_8, 0xB_F_5_9_7_F_C_7, 0xC_6_E_0_0_B_F_3, 0xD_5_A_7_9_1_4_7, 0x0_6_C_A_6_3_5_1, 0x1_4_2_9_2_9_6_7, 0x2_7_B_7_0_A_8_5, 0x2_E_1_B_2_1_3_8, 0x4_D_2_C_6_D_F_C, 0x5_3_3_8_0_D_1_3, 0x6_5_0_A_7_3_5_4, 0x7_6_6_A_0_A_B_B, 0x8_1_C_2_C_9_2_E, 0x9_2_7_2_2_C_8_5, 0xA_2_B_F_E_8_A_1, 0xA_8_1_A_6_6_4_B, 0xC_2_4_B_8_B_7_0, 0xC_7_6_C_5_1_A_3, 0xD_1_9_2_E_8_1_9, 0xD_6_9_9_0_6_2_4, 0xF_4_0_E_3_5_8_5, 0x1_0_6_A_A_0_7_0, 0x1_9_A_4_C_1_1_6, 0x1_E_3_7_6_C_0_8, 0x2_7_4_8_7_7_4_C, 0x3_4_B_0_B_C_B_5, 0x3_9_1_C_0_C_B_3, 0x4_E_D_8_A_A_4_A, 0x5_B_9_C_C_A_4_F, 0x6_8_2_E_6_F_F_3, 0x7_4_8_F_8_2_E_E, 0x7_8_A_5_6_3_6_F, 0x8_4_C_8_7_8_1_4, 0x8_C_C_7_0_2_0_8, 0x9_0_B_E_F_F_F_A, 0xA_4_5_0_6_C_E_B, 0xB_E_F_9_A_3_F_7, 0xC_6_7_1_7_8_F_2, ] __UpperCAmelCase : List[Any] = self.preprocessing(self.data ) self.final_hash() @staticmethod def _lowerCamelCase ( __lowerCamelCase: bytes ) -> bytes: __UpperCAmelCase : List[str] = B"\x80" + (B"\x00" * (63 - (len(__lowerCamelCase ) + 8) % 64)) __UpperCAmelCase : int = struct.pack(">Q" , (len(__lowerCamelCase ) * 8) ) return data + padding + big_endian_integer def _lowerCamelCase ( self: Dict ) -> None: # Convert into blocks of 64 bytes __UpperCAmelCase : Dict = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __UpperCAmelCase : List[str] = list(struct.unpack(">16L" , __lowerCamelCase ) ) # add 48 0-ed integers words += [0] * 48 __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array __UpperCAmelCase : Union[str, Any] = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) __UpperCAmelCase : str = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) __UpperCAmelCase : Union[str, Any] = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_0_0_0_0_0_0_0_0 # Compression __UpperCAmelCase : Union[str, Any] = self.ror(__lowerCamelCase , 6 ) ^ self.ror(__lowerCamelCase , 11 ) ^ self.ror(__lowerCamelCase , 25 ) __UpperCAmelCase : Tuple = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g) __UpperCAmelCase : int = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_0_0_0_0_0_0_0_0 __UpperCAmelCase : List[Any] = self.ror(__lowerCamelCase , 2 ) ^ self.ror(__lowerCamelCase , 13 ) ^ self.ror(__lowerCamelCase , 22 ) __UpperCAmelCase : Dict = (a & b) ^ (a & c) ^ (b & c) __UpperCAmelCase : int = (sa + maj) % 0x1_0_0_0_0_0_0_0_0 __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = ( g, f, e, ((d + tempa) % 0x1_0_0_0_0_0_0_0_0), c, b, a, ((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0), ) __UpperCAmelCase : Optional[int] = [a, b, c, d, e, f, g, h] # Modify final values __UpperCAmelCase : List[str] = [ ((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0) for index, element in enumerate(self.hashes ) ] __UpperCAmelCase : int = "".join([hex(__lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> int: return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations) class _snake_case ( unittest.TestCase ): def _lowerCamelCase ( self: List[Any] ) -> None: import hashlib __UpperCAmelCase : Dict = bytes("Test String" , "utf-8" ) self.assertEqual(SHAaaa(__lowerCamelCase ).hash , hashlib.shaaaa(__lowerCamelCase ).hexdigest() ) def _UpperCamelCase ( ) -> None: import doctest doctest.testmod() __UpperCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument( "-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", ) parser.add_argument( "-f", "--file", dest="input_file", help="Hash contents of a file" ) __UpperCAmelCase : List[Any] = parser.parse_args() __UpperCAmelCase : Optional[int] = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file, "rb" ) as f: __UpperCAmelCase : List[str] = f.read() else: __UpperCAmelCase : List[Any] = bytes(snake_case__, "utf-8" ) print(SHAaaa(snake_case__ ).hash ) if __name__ == "__main__": main()
342
1
from ...configuration_utils import PretrainedConfig _snake_case = { '''google/tapas-base-finetuned-sqa''': ( '''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wtq''': ( '''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wikisql-supervised''': ( '''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json''' ), '''google/tapas-base-finetuned-tabfact''': ( '''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json''' ), } class _snake_case ( _lowercase ): lowerCamelCase__: List[Any] = "tapas" def __init__( self: Optional[int] , __lowerCamelCase: int=3_05_22 , __lowerCamelCase: str=7_68 , __lowerCamelCase: Union[str, Any]=12 , __lowerCamelCase: Tuple=12 , __lowerCamelCase: int=30_72 , __lowerCamelCase: str="gelu" , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=10_24 , __lowerCamelCase: Optional[Any]=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , __lowerCamelCase: Tuple=0.02 , __lowerCamelCase: str=1e-12 , __lowerCamelCase: Optional[Any]=0 , __lowerCamelCase: Optional[int]=10.0 , __lowerCamelCase: Union[str, Any]=0 , __lowerCamelCase: Optional[int]=1.0 , __lowerCamelCase: List[str]=None , __lowerCamelCase: List[str]=1.0 , __lowerCamelCase: Optional[Any]=False , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: List[str]=1.0 , __lowerCamelCase: Tuple=1.0 , __lowerCamelCase: Optional[Any]=False , __lowerCamelCase: List[Any]=False , __lowerCamelCase: List[Any]="ratio" , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: List[str]=None , __lowerCamelCase: Optional[int]=64 , __lowerCamelCase: List[Any]=32 , __lowerCamelCase: List[Any]=False , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: int=False , __lowerCamelCase: List[Any]=False , __lowerCamelCase: List[Any]=True , __lowerCamelCase: str=False , __lowerCamelCase: List[str]=None , __lowerCamelCase: str=None , **__lowerCamelCase: Optional[Any] , ) -> int: super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) __UpperCAmelCase : str = vocab_size __UpperCAmelCase : str = hidden_size __UpperCAmelCase : Tuple = num_hidden_layers __UpperCAmelCase : Optional[Any] = num_attention_heads __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : int = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : Optional[int] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_sizes __UpperCAmelCase : Optional[Any] = initializer_range __UpperCAmelCase : Dict = layer_norm_eps # Fine-tuning task hyperparameters __UpperCAmelCase : int = positive_label_weight __UpperCAmelCase : Optional[Any] = num_aggregation_labels __UpperCAmelCase : int = aggregation_loss_weight __UpperCAmelCase : Any = use_answer_as_supervision __UpperCAmelCase : str = answer_loss_importance __UpperCAmelCase : Union[str, Any] = use_normalized_answer_loss __UpperCAmelCase : str = huber_loss_delta __UpperCAmelCase : Optional[int] = temperature __UpperCAmelCase : Dict = aggregation_temperature __UpperCAmelCase : Union[str, Any] = use_gumbel_for_cells __UpperCAmelCase : Optional[Any] = use_gumbel_for_aggregation __UpperCAmelCase : str = average_approximation_function __UpperCAmelCase : Any = cell_selection_preference __UpperCAmelCase : Any = answer_loss_cutoff __UpperCAmelCase : Optional[int] = max_num_rows __UpperCAmelCase : List[Any] = max_num_columns __UpperCAmelCase : Dict = average_logits_per_cell __UpperCAmelCase : List[str] = select_one_column __UpperCAmelCase : Any = allow_empty_column_selection __UpperCAmelCase : Tuple = init_cell_selection_weights_to_zero __UpperCAmelCase : List[Any] = reset_position_index_per_cell __UpperCAmelCase : Optional[int] = disable_per_token_loss # Aggregation hyperparameters __UpperCAmelCase : Optional[Any] = aggregation_labels __UpperCAmelCase : str = no_aggregation_label_index if isinstance(self.aggregation_labels , __lowerCamelCase ): __UpperCAmelCase : List[Any] = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
342
import numpy as np import datasets _snake_case = ''' Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] ''' _snake_case = '''\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ''' _snake_case = ''' Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def _lowerCamelCase ( self: List[str] ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> List[str]: # convert to numpy arrays __UpperCAmelCase : int = np.array(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = np.array(__lowerCamelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction __UpperCAmelCase : str = X - np.mean(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = np.cov(reference_distribution.T ) try: __UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase ) except np.linalg.LinAlgError: __UpperCAmelCase : Optional[int] = np.linalg.pinv(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = np.dot(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
342
1
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL _snake_case = logging.get_logger(__name__) def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> Tuple[int, int]: def constraint_to_multiple_of(snake_case__, snake_case__, snake_case__=0, snake_case__=None ): __UpperCAmelCase : Tuple = round(val / multiple ) * multiple if max_val is not None and x > max_val: __UpperCAmelCase : Tuple = math.floor(val / multiple ) * multiple if x < min_val: __UpperCAmelCase : List[str] = math.ceil(val / multiple ) * multiple return x __UpperCAmelCase : Tuple = (output_size, output_size) if isinstance(snake_case__, snake_case__ ) else output_size __UpperCAmelCase , __UpperCAmelCase : int = get_image_size(snake_case__ ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = output_size # determine new height and width __UpperCAmelCase : int = output_height / input_height __UpperCAmelCase : Union[str, Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width __UpperCAmelCase : int = scale_width else: # fit height __UpperCAmelCase : Optional[int] = scale_height __UpperCAmelCase : Dict = constraint_to_multiple_of(scale_height * input_height, multiple=snake_case__ ) __UpperCAmelCase : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width, multiple=snake_case__ ) return (new_height, new_width) class _snake_case ( _lowercase ): lowerCamelCase__: int = ["pixel_values"] def __init__( self: Tuple , __lowerCamelCase: bool = True , __lowerCamelCase: Dict[str, int] = None , __lowerCamelCase: PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase: bool = False , __lowerCamelCase: int = 1 , __lowerCamelCase: bool = True , __lowerCamelCase: Union[int, float] = 1 / 2_55 , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[float, List[float]]] = None , __lowerCamelCase: Optional[Union[float, List[float]]] = None , **__lowerCamelCase: List[Any] , ) -> None: super().__init__(**__lowerCamelCase ) __UpperCAmelCase : Tuple = size if size is not None else {"height": 3_84, "width": 3_84} __UpperCAmelCase : Optional[Any] = get_size_dict(__lowerCamelCase ) __UpperCAmelCase : List[Any] = do_resize __UpperCAmelCase : Dict = size __UpperCAmelCase : Dict = keep_aspect_ratio __UpperCAmelCase : Dict = ensure_multiple_of __UpperCAmelCase : str = resample __UpperCAmelCase : Any = do_rescale __UpperCAmelCase : Optional[Any] = rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize __UpperCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __UpperCAmelCase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCamelCase ( self: Dict , __lowerCamelCase: np.ndarray , __lowerCamelCase: Dict[str, int] , __lowerCamelCase: bool = False , __lowerCamelCase: int = 1 , __lowerCamelCase: PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase: str , ) -> np.ndarray: __UpperCAmelCase : Any = get_size_dict(__lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) __UpperCAmelCase : Optional[Any] = get_resize_output_image_size( __lowerCamelCase , output_size=(size["height"], size["width"]) , keep_aspect_ratio=__lowerCamelCase , multiple=__lowerCamelCase , ) return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: np.ndarray , __lowerCamelCase: Union[int, float] , __lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase: str , ) -> Any: return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: int , __lowerCamelCase: np.ndarray , __lowerCamelCase: Union[float, List[float]] , __lowerCamelCase: Union[float, List[float]] , __lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase: Optional[Any] , ) -> np.ndarray: return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: ImageInput , __lowerCamelCase: bool = None , __lowerCamelCase: int = None , __lowerCamelCase: bool = None , __lowerCamelCase: int = None , __lowerCamelCase: PILImageResampling = None , __lowerCamelCase: bool = None , __lowerCamelCase: float = None , __lowerCamelCase: bool = None , __lowerCamelCase: Optional[Union[float, List[float]]] = None , __lowerCamelCase: Optional[Union[float, List[float]]] = None , __lowerCamelCase: Optional[Union[str, TensorType]] = None , __lowerCamelCase: ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase: Optional[int] , ) -> PIL.Image.Image: __UpperCAmelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Optional[Any] = size if size is not None else self.size __UpperCAmelCase : Optional[Any] = get_size_dict(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio __UpperCAmelCase : Dict = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of __UpperCAmelCase : Optional[int] = resample if resample is not None else self.resample __UpperCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : List[Any] = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : Any = make_list_of_images(__lowerCamelCase ) if not valid_images(__lowerCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. __UpperCAmelCase : Union[str, Any] = [to_numpy_array(__lowerCamelCase ) for image in images] if do_resize: __UpperCAmelCase : List[Any] = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Optional[int] = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : Optional[Any] = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images] __UpperCAmelCase : str = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images] __UpperCAmelCase : List[str] = {"pixel_values": images} return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[Tuple] = None ) -> Optional[Any]: __UpperCAmelCase : List[str] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(__lowerCamelCase ): __UpperCAmelCase : List[str] = target_sizes.numpy() __UpperCAmelCase : Tuple = [] for idx in range(len(__lowerCamelCase ) ): __UpperCAmelCase : List[str] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__lowerCamelCase ) __UpperCAmelCase : Optional[int] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__lowerCamelCase ) else: __UpperCAmelCase : Dict = logits.argmax(dim=1 ) __UpperCAmelCase : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
342
import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class _snake_case ( unittest.TestCase ): def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str: __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : Optional[int] = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : List[str] = use_attention_mask __UpperCAmelCase : Dict = use_token_type_ids __UpperCAmelCase : Optional[int] = use_labels __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Dict = num_hidden_layers __UpperCAmelCase : Dict = num_attention_heads __UpperCAmelCase : Tuple = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_act __UpperCAmelCase : Tuple = hidden_dropout_prob __UpperCAmelCase : str = attention_probs_dropout_prob __UpperCAmelCase : Optional[Any] = max_position_embeddings __UpperCAmelCase : Optional[int] = type_vocab_size __UpperCAmelCase : str = type_sequence_label_size __UpperCAmelCase : Tuple = initializer_range __UpperCAmelCase : str = num_choices def _lowerCamelCase ( self: Optional[Any] ) -> List[str]: __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : str = None if self.use_attention_mask: __UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Any = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , ) return config, input_ids, attention_mask def _lowerCamelCase ( self: str ) -> Any: __UpperCAmelCase : List[str] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs __UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class _snake_case ( _lowercase , unittest.TestCase ): lowerCamelCase__: str = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self: List[Any] ) -> Dict: __UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self ) @slow def _lowerCamelCase ( self: Tuple ) -> Optional[Any]: for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" ) __UpperCAmelCase : Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCamelCase ) @require_flax class _snake_case ( unittest.TestCase ): @slow def _lowerCamelCase ( self: int ) -> List[Any]: __UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" ) __UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] __UpperCAmelCase : str = (1, 11, 7_68) self.assertEqual(output.shape , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
342
1
from PIL import Image def _UpperCamelCase ( snake_case__, snake_case__ ) -> Image: def brightness(snake_case__ ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("level must be between -255.0 (black) and 255.0 (white)" ) return img.point(snake_case__ ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change brightness to 100 _snake_case = change_brightness(img, 100) brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
342
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration _snake_case = [ # tf -> hf ('''/''', '''.'''), ('''layer_''', '''layers.'''), ('''kernel''', '''weight'''), ('''beta''', '''bias'''), ('''gamma''', '''weight'''), ('''pegasus''', '''model'''), ] _snake_case = [ ('''.output.dense''', '''.fc2'''), ('''intermediate.LayerNorm''', '''final_layer_norm'''), ('''intermediate.dense''', '''fc1'''), ] _snake_case = ( INIT_COMMON + [ ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.out_proj'''), ('''attention.self''', '''self_attn'''), ('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''), ('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''), ('''attention.encdec''', '''encoder_attn'''), ('''key''', '''k_proj'''), ('''value''', '''v_proj'''), ('''query''', '''q_proj'''), ('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''), ] + END_COMMON ) _snake_case = ( INIT_COMMON + [ ('''embeddings.word_embeddings''', '''shared.weight'''), ('''embeddings.position_embeddings''', '''embed_positions.weight'''), ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.output'''), ('''attention.self''', '''self_attn.self'''), ('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''), ] + END_COMMON ) _snake_case = [ '''encdec/key/bias''', '''encdec/query/bias''', '''encdec/value/bias''', '''self/key/bias''', '''self/query/bias''', '''self/value/bias''', '''encdec_output/dense/bias''', '''attention/output/dense/bias''', ] def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any: for tf_name, hf_name in patterns: __UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ ) return k def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration: __UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ ) __UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ ) __UpperCAmelCase : Optional[Any] = torch_model.state_dict() __UpperCAmelCase : Optional[int] = {} # separating decoder weights __UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )} __UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )} for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ): __UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE] if any(snake_case__ ): continue __UpperCAmelCase : List[str] = DECODER_PATTERNS __UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ ) if new_k not in state_dict: raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ["dense", "query", "key", "value"] ): __UpperCAmelCase : Optional[int] = v.T __UpperCAmelCase : str = torch.from_numpy(snake_case__ ) assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ): __UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE] if any(snake_case__ ): continue __UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS __UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ["dense", "query", "key", "value"] ): __UpperCAmelCase : List[Any] = v.T __UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' __UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"] __UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" ) __UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ ) __UpperCAmelCase : str = [ k for k in missing if k not in [ "final_logits_bias", "model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight", "lm_head.weight", ] ] assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], f'''no matches found for the following tf keys {extra}''' return torch_model def _UpperCamelCase ( snake_case__ ) -> Dict: __UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ ) __UpperCAmelCase : List[str] = {} __UpperCAmelCase : str = ["global_step"] for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ): __UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name ) if skip_key: continue __UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ ) __UpperCAmelCase : Tuple = array return tf_weights def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict: __UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ ) __UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ ) torch_model.save_pretrained(snake_case__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') _snake_case = parser.parse_args() _snake_case = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
342
1
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _snake_case = datasets.utils.logging.get_logger(__name__) _snake_case = ['''names''', '''prefix'''] _snake_case = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] _snake_case = ['''encoding_errors''', '''on_bad_lines'''] _snake_case = ['''date_format'''] @dataclass class _snake_case ( datasets.BuilderConfig ): lowerCamelCase__: str = "," lowerCamelCase__: Optional[str] = None lowerCamelCase__: Optional[Union[int, List[int], str]] = "infer" lowerCamelCase__: Optional[List[str]] = None lowerCamelCase__: Optional[List[str]] = None lowerCamelCase__: Optional[Union[int, str, List[int], List[str]]] = None lowerCamelCase__: Optional[Union[List[int], List[str]]] = None lowerCamelCase__: Optional[str] = None lowerCamelCase__: bool = True lowerCamelCase__: Optional[Literal["c", "python", "pyarrow"]] = None lowerCamelCase__: Dict[Union[int, str], Callable[[Any], Any]] = None lowerCamelCase__: Optional[list] = None lowerCamelCase__: Optional[list] = None lowerCamelCase__: bool = False lowerCamelCase__: Optional[Union[int, List[int]]] = None lowerCamelCase__: Optional[int] = None lowerCamelCase__: Optional[Union[str, List[str]]] = None lowerCamelCase__: bool = True lowerCamelCase__: bool = True lowerCamelCase__: bool = False lowerCamelCase__: bool = True lowerCamelCase__: Optional[str] = None lowerCamelCase__: str = "." lowerCamelCase__: Optional[str] = None lowerCamelCase__: str = '"' lowerCamelCase__: int = 0 lowerCamelCase__: Optional[str] = None lowerCamelCase__: Optional[str] = None lowerCamelCase__: Optional[str] = None lowerCamelCase__: Optional[str] = None lowerCamelCase__: bool = True lowerCamelCase__: bool = True lowerCamelCase__: int = 0 lowerCamelCase__: bool = True lowerCamelCase__: bool = False lowerCamelCase__: Optional[str] = None lowerCamelCase__: int = 1_00_00 lowerCamelCase__: Optional[datasets.Features] = None lowerCamelCase__: Optional[str] = "strict" lowerCamelCase__: Literal["error", "warn", "skip"] = "error" lowerCamelCase__: Optional[str] = None def _lowerCamelCase ( self: List[str] ) -> str: if self.delimiter is not None: __UpperCAmelCase : Any = self.delimiter if self.column_names is not None: __UpperCAmelCase : int = self.column_names @property def _lowerCamelCase ( self: List[Any] ) -> Optional[int]: __UpperCAmelCase : Any = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __lowerCamelCase ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class _snake_case ( datasets.ArrowBasedBuilder ): lowerCamelCase__: Optional[int] = CsvConfig def _lowerCamelCase ( self: Any ) -> Union[str, Any]: return datasets.DatasetInfo(features=self.config.features ) def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> List[str]: if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __UpperCAmelCase : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__lowerCamelCase , (str, list, tuple) ): __UpperCAmelCase : int = data_files if isinstance(__lowerCamelCase , __lowerCamelCase ): __UpperCAmelCase : Union[str, Any] = [files] __UpperCAmelCase : int = [dl_manager.iter_files(__lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] __UpperCAmelCase : Union[str, Any] = [] for split_name, files in data_files.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): __UpperCAmelCase : str = [files] __UpperCAmelCase : List[Any] = [dl_manager.iter_files(__lowerCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={"files": files} ) ) return splits def _lowerCamelCase ( self: Dict , __lowerCamelCase: pa.Table ) -> pa.Table: if self.config.features is not None: __UpperCAmelCase : List[Any] = self.config.features.arrow_schema if all(not require_storage_cast(__lowerCamelCase ) for feature in self.config.features.values() ): # cheaper cast __UpperCAmelCase : Union[str, Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__lowerCamelCase ) else: # more expensive cast; allows str <-> int/float or str to Audio for example __UpperCAmelCase : Optional[int] = table_cast(__lowerCamelCase , __lowerCamelCase ) return pa_table def _lowerCamelCase ( self: Tuple , __lowerCamelCase: str ) -> Tuple: __UpperCAmelCase : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str __UpperCAmelCase : int = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(__lowerCamelCase ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ): __UpperCAmelCase : str = pd.read_csv(__lowerCamelCase , iterator=__lowerCamelCase , dtype=__lowerCamelCase , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(__lowerCamelCase ): __UpperCAmelCase : str = pa.Table.from_pandas(__lowerCamelCase ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__lowerCamelCase ) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(__lowerCamelCase )}: {e}''' ) raise
342
import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class _snake_case ( _lowercase ): lowerCamelCase__: Any = ["image_processor", "tokenizer"] lowerCamelCase__: Optional[Any] = "BlipImageProcessor" lowerCamelCase__: Optional[int] = "AutoTokenizer" def __init__( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict: super().__init__(__lowerCamelCase , __lowerCamelCase ) # add QFormer tokenizer __UpperCAmelCase : Dict = qformer_tokenizer def __call__( self: Any , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Dict , ) -> BatchFeature: if images is None and text is None: raise ValueError("You have to specify at least images or text." ) __UpperCAmelCase : str = BatchFeature() if text is not None: __UpperCAmelCase : Any = self.tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) encoding.update(__lowerCamelCase ) __UpperCAmelCase : Dict = self.qformer_tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) __UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" ) __UpperCAmelCase : Optional[int] = qformer_text_encoding.pop("attention_mask" ) if images is not None: __UpperCAmelCase : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase ) encoding.update(__lowerCamelCase ) return encoding def _lowerCamelCase ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Optional[Any]: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: Dict ) -> Tuple: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def _lowerCamelCase ( self: List[str] ) -> Tuple: __UpperCAmelCase : str = self.tokenizer.model_input_names __UpperCAmelCase : Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> str: if os.path.isfile(__lowerCamelCase ): raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) __UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , "qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(__lowerCamelCase ) return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase ) @classmethod def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]: __UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" ) __UpperCAmelCase : List[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase ) args.append(__lowerCamelCase ) return cls(*__lowerCamelCase )
342
1
from math import factorial def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> float: if successes > trials: raise ValueError("successes must be lower or equal to trials" ) if trials < 0 or successes < 0: raise ValueError("the function is defined for non-negative integers" ) if not isinstance(snake_case__, snake_case__ ) or not isinstance(snake_case__, snake_case__ ): raise ValueError("the function is defined for non-negative integers" ) if not 0 < prob < 1: raise ValueError("prob has to be in range of 1 - 0" ) __UpperCAmelCase : List[Any] = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! __UpperCAmelCase : Dict = float(factorial(snake_case__ ) ) coefficient /= factorial(snake_case__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.7_5))
342
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _snake_case = logging.get_logger(__name__) _snake_case = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } _snake_case = { '''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''}, '''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''}, '''tokenizer_config_file''': { '''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json''' }, } _snake_case = {'''facebook/blenderbot-3B''': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _UpperCamelCase ( ) -> Dict: __UpperCAmelCase : Tuple = ( list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) ) ) __UpperCAmelCase : str = bs[:] __UpperCAmelCase : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(snake_case__ ) cs.append(2**8 + n ) n += 1 __UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs] return dict(zip(snake_case__, snake_case__ ) ) def _UpperCamelCase ( snake_case__ ) -> Any: __UpperCAmelCase : List[Any] = set() __UpperCAmelCase : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase : Union[str, Any] = char return pairs class _snake_case ( _lowercase ): lowerCamelCase__: str = VOCAB_FILES_NAMES lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__: Dict = ["input_ids", "attention_mask"] def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]: __UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token __UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token __UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token __UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token __UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token __UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: __UpperCAmelCase : List[Any] = json.load(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()} __UpperCAmelCase : Dict = errors # how to handle errors in decoding __UpperCAmelCase : Optional[int] = bytes_to_unicode() __UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: __UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1] __UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges] __UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : Tuple = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _lowerCamelCase ( self: Dict ) -> Any: return len(self.encoder ) def _lowerCamelCase ( self: Optional[Any] ) -> List[str]: return dict(self.encoder , **self.added_tokens_encoder ) def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]: if token in self.cache: return self.cache[token] __UpperCAmelCase : List[Any] = tuple(__lowerCamelCase ) __UpperCAmelCase : Dict = get_pairs(__lowerCamelCase ) if not pairs: return token while True: __UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : str = 0 while i < len(__lowerCamelCase ): try: __UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase : Union[str, Any] = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase : List[Any] = tuple(__lowerCamelCase ) __UpperCAmelCase : str = new_word if len(__lowerCamelCase ) == 1: break else: __UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = word return word def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict: __UpperCAmelCase : Any = [] for token in re.findall(self.pat , __lowerCamelCase ): __UpperCAmelCase : int = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict: return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]: return self.decoder.get(__lowerCamelCase ) def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int: __UpperCAmelCase : Dict = "".join(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase : Any = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __UpperCAmelCase : Dict = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) __UpperCAmelCase : Optional[Any] = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) __UpperCAmelCase : Optional[Any] = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]: __UpperCAmelCase : int = [self.sep_token_id] __UpperCAmelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]: __UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): __UpperCAmelCase : Optional[Any] = " " + text return (text, kwargs) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]: return token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]: __UpperCAmelCase : Tuple = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase ) if len(__lowerCamelCase ) > self.model_max_length: __UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
342
1
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''vocab_file''': '''vocab.json''', '''tokenizer_config_file''': '''tokenizer_config.json''', '''merges_file''': '''merges.txt''', } _snake_case = { '''vocab_file''': { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json''' ), }, '''tokenizer_config_file''': { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json''' ), }, '''merges_file''': { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt''' ), }, } _snake_case = '''</w>''' _snake_case = '''@@ ''' def _UpperCamelCase ( snake_case__ ) -> Optional[Any]: __UpperCAmelCase : Any = set() __UpperCAmelCase : Dict = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase : Optional[int] = char return pairs # Speech2Text2 has no max input length _snake_case = {'''facebook/s2t-wav2vec2-large-en-de''': 1024} class _snake_case ( _lowercase ): lowerCamelCase__: int = VOCAB_FILES_NAMES lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__: Dict = ["input_ids", "attention_mask"] def __init__( self: int , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: Dict="<unk>" , __lowerCamelCase: List[str]=False , __lowerCamelCase: Union[str, Any]=None , **__lowerCamelCase: int , ) -> Dict: super().__init__( unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , ) __UpperCAmelCase : Optional[Any] = do_lower_case with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: __UpperCAmelCase : List[str] = json.load(__lowerCamelCase ) __UpperCAmelCase : int = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : List[str] = None else: with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: __UpperCAmelCase : Optional[Any] = merges_handle.read().split("\n" )[:-1] __UpperCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges] __UpperCAmelCase : Union[str, Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) __UpperCAmelCase : Optional[Any] = {} @property def _lowerCamelCase ( self: int ) -> int: return len(self.decoder ) def _lowerCamelCase ( self: Any ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def _lowerCamelCase ( self: Tuple , __lowerCamelCase: int ) -> Tuple: __UpperCAmelCase : List[Any] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] __UpperCAmelCase : List[Any] = get_pairs(__lowerCamelCase ) if not pairs: return token while True: __UpperCAmelCase : List[str] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram __UpperCAmelCase : int = [] __UpperCAmelCase : Union[str, Any] = 0 while i < len(__lowerCamelCase ): try: __UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase : Tuple = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase : Any = tuple(__lowerCamelCase ) __UpperCAmelCase : List[Any] = new_word if len(__lowerCamelCase ) == 1: break else: __UpperCAmelCase : str = get_pairs(__lowerCamelCase ) __UpperCAmelCase : List[Any] = " ".join(__lowerCamelCase ) if word == "\n " + BPE_TOKEN_MERGES: __UpperCAmelCase : str = "\n" + BPE_TOKEN_MERGES if word.endswith(__lowerCamelCase ): __UpperCAmelCase : Dict = word.replace(__lowerCamelCase , "" ) __UpperCAmelCase : Dict = word.replace(" " , __lowerCamelCase ) __UpperCAmelCase : Dict = word return word def _lowerCamelCase ( self: str , __lowerCamelCase: int ) -> Dict: if self.bpe_ranks is None: raise ValueError( "This tokenizer was instantiated without a `merges.txt` file, so" " that it can only be used for decoding, not for encoding." "Make sure to provide `merges.txt` file at instantiation to enable " "encoding." ) if self.do_lower_case: __UpperCAmelCase : str = text.lower() __UpperCAmelCase : List[Any] = text.split() __UpperCAmelCase : Tuple = [] for token in text: if token: split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(" " ) ) ) return split_tokens def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: str ) -> int: return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int ) -> str: __UpperCAmelCase : Any = self.decoder.get(__lowerCamelCase , self.unk_token ) return result def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[str] ) -> str: __UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase ) # make sure @@ tokens are concatenated __UpperCAmelCase : Union[str, Any] = "".join(string.split(__lowerCamelCase ) ) return string def _lowerCamelCase ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase : Any = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __UpperCAmelCase : Optional[Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) __UpperCAmelCase : Optional[int] = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) __UpperCAmelCase : Union[str, Any] = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return (vocab_file, merges_file)
342
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( _lowercase , unittest.TestCase ): lowerCamelCase__: List[Any] = CanineTokenizer lowerCamelCase__: Optional[int] = False def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: super().setUp() __UpperCAmelCase : Tuple = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]: return CanineTokenizer.from_pretrained("google/canine-s" ) def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> CanineTokenizer: __UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) __UpperCAmelCase : Optional[int] = 10_24 return tokenizer @require_torch def _lowerCamelCase ( self: List[str] ) -> int: __UpperCAmelCase : Union[str, Any] = self.canine_tokenizer __UpperCAmelCase : List[str] = ["Life is like a box of chocolates.", "You never know what you're gonna get."] # fmt: off __UpperCAmelCase : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on __UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def _lowerCamelCase ( self: Optional[Any] ) -> Tuple: __UpperCAmelCase : Optional[Any] = self.canine_tokenizer __UpperCAmelCase : Dict = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."] __UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("input_ids" , __lowerCamelCase ) self.assertIn("attention_mask" , __lowerCamelCase ) self.assertIn("token_type_ids" , __lowerCamelCase ) @require_torch def _lowerCamelCase ( self: Any ) -> List[str]: __UpperCAmelCase : Optional[Any] = self.canine_tokenizer __UpperCAmelCase : int = [ "What's the weater?", "It's about 25 degrees.", ] __UpperCAmelCase : List[Any] = tokenizer( text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) def _lowerCamelCase ( self: List[Any] ) -> Tuple: # safety check on max_len default value so we are sure the test works __UpperCAmelCase : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __UpperCAmelCase : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __UpperCAmelCase : int = tempfile.mkdtemp() __UpperCAmelCase : List[Any] = " He is very happy, UNwant\u00E9d,running" __UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) tokenizer.save_pretrained(__lowerCamelCase ) __UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCamelCase ) __UpperCAmelCase : Dict = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) shutil.rmtree(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __UpperCAmelCase : List[Any] = tempfile.mkdtemp() __UpperCAmelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running" __UpperCAmelCase : str = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: __UpperCAmelCase : Tuple = chr(0xE_0_0_7 ) additional_special_tokens.append(__lowerCamelCase ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) __UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) tokenizer.save_pretrained(__lowerCamelCase ) __UpperCAmelCase : str = tokenizer.__class__.from_pretrained(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__lowerCamelCase ) def _lowerCamelCase ( self: str ) -> Optional[int]: __UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_clean_sequence(__lowerCamelCase ) # a special token for Canine can be defined as follows: __UpperCAmelCase : int = 0xE_0_0_5 __UpperCAmelCase : Tuple = chr(__lowerCamelCase ) tokenizer.add_special_tokens({"cls_token": special_token} ) __UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(len(__lowerCamelCase ) , 1 ) __UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) __UpperCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) __UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(__lowerCamelCase , input_encoded + special_token_id ) __UpperCAmelCase : Optional[int] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) self.assertTrue(special_token not in decoded ) def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]: __UpperCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase : Optional[int] = chr(0xE_0_0_5 ) __UpperCAmelCase : List[str] = chr(0xE_0_0_6 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} ) __UpperCAmelCase : Tuple = tokenizer.tokenize(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase ) self.assertEqual(len(__lowerCamelCase ) , 1 ) self.assertEqual(len(__lowerCamelCase ) , 1 ) self.assertEqual(token_a[0] , __lowerCamelCase ) self.assertEqual(token_a[0] , __lowerCamelCase ) @require_tokenizers def _lowerCamelCase ( self: str ) -> Union[str, Any]: __UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: __UpperCAmelCase : Union[str, Any] = 0xE_0_0_6 __UpperCAmelCase : int = chr(__lowerCamelCase ) __UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase ) tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__lowerCamelCase ) tokenizer.from_pretrained(__lowerCamelCase ) def _lowerCamelCase ( self: Dict ) -> List[str]: __UpperCAmelCase : str = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: __UpperCAmelCase : Tuple = json.load(__lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: __UpperCAmelCase : Optional[int] = json.load(__lowerCamelCase ) # a special token for Canine can be defined as follows: __UpperCAmelCase : Any = 0xE_0_0_6 __UpperCAmelCase : Union[str, Any] = chr(__lowerCamelCase ) __UpperCAmelCase : Dict = [new_token_a] __UpperCAmelCase : int = [new_token_a] with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(__lowerCamelCase , __lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(__lowerCamelCase , __lowerCamelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 ) self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) __UpperCAmelCase : List[Any] = 0xE_0_0_7 __UpperCAmelCase : List[Any] = chr(__lowerCamelCase ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __UpperCAmelCase : str = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )] __UpperCAmelCase : Dict = tokenizer_class.from_pretrained( __lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 ) self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase : int = "hello world" if self.space_between_special_tokens: __UpperCAmelCase : Any = "[CLS] hello world [SEP]" else: __UpperCAmelCase : Union[str, Any] = input __UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) __UpperCAmelCase : Any = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__lowerCamelCase , [output, output.lower()] ) def _lowerCamelCase ( self: Dict ) -> Any: __UpperCAmelCase : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase : List[str] = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] __UpperCAmelCase : List[str] = "a" __UpperCAmelCase : Any = ord(__lowerCamelCase ) for attr in attributes_list: setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase ) setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase ) setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] ) __UpperCAmelCase : Tuple = 0xE_0_0_6 __UpperCAmelCase : Optional[Any] = chr(__lowerCamelCase ) setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] ) def _lowerCamelCase ( self: str ) -> Union[str, Any]: pass def _lowerCamelCase ( self: Any ) -> Any: pass def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple: pass def _lowerCamelCase ( self: Optional[int] ) -> Any: pass def _lowerCamelCase ( self: List[str] ) -> str: pass def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]: pass def _lowerCamelCase ( self: Optional[Any] ) -> Tuple: pass def _lowerCamelCase ( self: str ) -> Tuple: pass
342
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _snake_case = { '''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''], '''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''MaskFormerFeatureExtractor'''] _snake_case = ['''MaskFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MaskFormerForInstanceSegmentation''', '''MaskFormerModel''', '''MaskFormerPreTrainedModel''', ] _snake_case = [ '''MaskFormerSwinBackbone''', '''MaskFormerSwinModel''', '''MaskFormerSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
342
import logging import os from .state import PartialState class _snake_case ( logging.LoggerAdapter ): @staticmethod def _lowerCamelCase ( __lowerCamelCase: Any ) -> int: __UpperCAmelCase : str = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]: if PartialState._shared_state == {}: raise RuntimeError( "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." ) __UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase ) if self.isEnabledFor(__lowerCamelCase ): if self._should_log(__lowerCamelCase ): __UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase ) self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) elif in_order: __UpperCAmelCase : Optional[int] = PartialState() for i in range(state.num_processes ): if i == state.process_index: __UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase ) self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) state.wait_for_everyone() def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]: if log_level is None: __UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ ) __UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(snake_case__, {} )
342
1
import logging import os from .state import PartialState class _snake_case ( logging.LoggerAdapter ): @staticmethod def _lowerCamelCase ( __lowerCamelCase: Any ) -> int: __UpperCAmelCase : str = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]: if PartialState._shared_state == {}: raise RuntimeError( "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." ) __UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase ) if self.isEnabledFor(__lowerCamelCase ): if self._should_log(__lowerCamelCase ): __UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase ) self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) elif in_order: __UpperCAmelCase : Optional[int] = PartialState() for i in range(state.num_processes ): if i == state.process_index: __UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase ) self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) state.wait_for_everyone() def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]: if log_level is None: __UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ ) __UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(snake_case__, {} )
342
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _snake_case ( _lowercase ): def __init__( self: Optional[Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str: super().__init__( __lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , ) __UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths} __UpperCAmelCase : int = Text( cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , ) def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]: # Build iterable dataset if self.streaming: __UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCAmelCase : Any = None __UpperCAmelCase : Any = None __UpperCAmelCase : Dict = None __UpperCAmelCase : str = None self.builder.download_and_prepare( download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , ) __UpperCAmelCase : Dict = self.builder.as_dataset( split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory ) return dataset
342
1
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class _snake_case : def __init__( self: Optional[int] , __lowerCamelCase: Collection[float] | None = None ) -> None: if components is None: __UpperCAmelCase : Tuple = [] __UpperCAmelCase : Dict = list(__lowerCamelCase ) def __len__( self: Optional[int] ) -> int: return len(self.__components ) def __str__( self: Optional[Any] ) -> str: return "(" + ",".join(map(__lowerCamelCase , self.__components ) ) + ")" def __add__( self: str , __lowerCamelCase: Vector ) -> Vector: __UpperCAmelCase : List[Any] = len(self ) if size == len(__lowerCamelCase ): __UpperCAmelCase : Tuple = [self.__components[i] + other.component(__lowerCamelCase ) for i in range(__lowerCamelCase )] return Vector(__lowerCamelCase ) else: raise Exception("must have the same size" ) def __sub__( self: List[str] , __lowerCamelCase: Vector ) -> Vector: __UpperCAmelCase : Union[str, Any] = len(self ) if size == len(__lowerCamelCase ): __UpperCAmelCase : int = [self.__components[i] - other.component(__lowerCamelCase ) for i in range(__lowerCamelCase )] return Vector(__lowerCamelCase ) else: # error case raise Exception("must have the same size" ) @overload def __mul__( self: Any , __lowerCamelCase: float ) -> Vector: ... @overload def __mul__( self: Dict , __lowerCamelCase: Vector ) -> float: ... def __mul__( self: Tuple , __lowerCamelCase: float | Vector ) -> float | Vector: if isinstance(__lowerCamelCase , (float, int) ): __UpperCAmelCase : int = [c * other for c in self.__components] return Vector(__lowerCamelCase ) elif isinstance(__lowerCamelCase , __lowerCamelCase ) and len(self ) == len(__lowerCamelCase ): __UpperCAmelCase : Union[str, Any] = len(self ) __UpperCAmelCase : List[str] = [self.__components[i] * other.component(__lowerCamelCase ) for i in range(__lowerCamelCase )] return sum(__lowerCamelCase ) else: # error case raise Exception("invalid operand!" ) def _lowerCamelCase ( self: Optional[int] ) -> Vector: return Vector(self.__components ) def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: int ) -> float: if isinstance(__lowerCamelCase , __lowerCamelCase ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception("index out of range" ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: float ) -> None: assert -len(self.__components ) <= pos < len(self.__components ) __UpperCAmelCase : List[str] = value def _lowerCamelCase ( self: int ) -> float: if len(self.__components ) == 0: raise Exception("Vector is empty" ) __UpperCAmelCase : int = [c**2 for c in self.__components] return math.sqrt(sum(__lowerCamelCase ) ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Vector , __lowerCamelCase: bool = False ) -> float: __UpperCAmelCase : Optional[int] = self * other __UpperCAmelCase : Optional[int] = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def _UpperCamelCase ( snake_case__ ) -> Vector: assert isinstance(snake_case__, snake_case__ ) return Vector([0] * dimension ) def _UpperCamelCase ( snake_case__, snake_case__ ) -> Vector: assert isinstance(snake_case__, snake_case__ ) and (isinstance(snake_case__, snake_case__ )) __UpperCAmelCase : Union[str, Any] = [0] * dimension __UpperCAmelCase : Any = 1 return Vector(snake_case__ ) def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Vector: assert ( isinstance(snake_case__, snake_case__ ) and isinstance(snake_case__, snake_case__ ) and (isinstance(snake_case__, (int, float) )) ) return x * scalar + y def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Vector: random.seed(snake_case__ ) __UpperCAmelCase : int = [random.randint(snake_case__, snake_case__ ) for _ in range(snake_case__ )] return Vector(snake_case__ ) class _snake_case : def __init__( self: int , __lowerCamelCase: list[list[float]] , __lowerCamelCase: int , __lowerCamelCase: int ) -> None: __UpperCAmelCase : List[str] = matrix __UpperCAmelCase : Any = w __UpperCAmelCase : Any = h def __str__( self: List[str] ) -> str: __UpperCAmelCase : Optional[int] = "" for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self: str , __lowerCamelCase: Matrix ) -> Matrix: if self.__width == other.width() and self.__height == other.height(): __UpperCAmelCase : Tuple = [] for i in range(self.__height ): __UpperCAmelCase : List[Any] = [ self.__matrix[i][j] + other.component(__lowerCamelCase , __lowerCamelCase ) for j in range(self.__width ) ] matrix.append(__lowerCamelCase ) return Matrix(__lowerCamelCase , self.__width , self.__height ) else: raise Exception("matrix must have the same dimension!" ) def __sub__( self: Optional[int] , __lowerCamelCase: Matrix ) -> Matrix: if self.__width == other.width() and self.__height == other.height(): __UpperCAmelCase : Optional[Any] = [] for i in range(self.__height ): __UpperCAmelCase : Any = [ self.__matrix[i][j] - other.component(__lowerCamelCase , __lowerCamelCase ) for j in range(self.__width ) ] matrix.append(__lowerCamelCase ) return Matrix(__lowerCamelCase , self.__width , self.__height ) else: raise Exception("matrices must have the same dimension!" ) @overload def __mul__( self: Dict , __lowerCamelCase: float ) -> Matrix: ... @overload def __mul__( self: Optional[Any] , __lowerCamelCase: Vector ) -> Vector: ... def __mul__( self: Optional[Any] , __lowerCamelCase: float | Vector ) -> Vector | Matrix: if isinstance(__lowerCamelCase , __lowerCamelCase ): # matrix-vector if len(__lowerCamelCase ) == self.__width: __UpperCAmelCase : List[Any] = zero_vector(self.__height ) for i in range(self.__height ): __UpperCAmelCase : Optional[Any] = [ self.__matrix[i][j] * other.component(__lowerCamelCase ) for j in range(self.__width ) ] ans.change_component(__lowerCamelCase , sum(__lowerCamelCase ) ) return ans else: raise Exception( "vector must have the same size as the " "number of columns of the matrix!" ) elif isinstance(__lowerCamelCase , (int, float) ): # matrix-scalar __UpperCAmelCase : List[Any] = [ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(__lowerCamelCase , self.__width , self.__height ) return None def _lowerCamelCase ( self: List[str] ) -> int: return self.__height def _lowerCamelCase ( self: Dict ) -> int: return self.__width def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: int ) -> float: if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception("change_component: indices out of bounds" ) def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: float ) -> None: if 0 <= x < self.__height and 0 <= y < self.__width: __UpperCAmelCase : str = value else: raise Exception("change_component: indices out of bounds" ) def _lowerCamelCase ( self: Any , __lowerCamelCase: int , __lowerCamelCase: int ) -> float: if self.__height != self.__width: raise Exception("Matrix is not square" ) __UpperCAmelCase : List[str] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__lowerCamelCase ) ): __UpperCAmelCase : str = minor[i][:y] + minor[i][y + 1 :] return Matrix(__lowerCamelCase , self.__width - 1 , self.__height - 1 ).determinant() def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: int ) -> float: if self.__height != self.__width: raise Exception("Matrix is not square" ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__lowerCamelCase , __lowerCamelCase ) else: raise Exception("Indices out of bounds" ) def _lowerCamelCase ( self: Optional[int] ) -> float: if self.__height != self.__width: raise Exception("Matrix is not square" ) if self.__height < 1: raise Exception("Matrix has no element" ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __UpperCAmelCase : Any = [ self.__matrix[0][y] * self.cofactor(0 , __lowerCamelCase ) for y in range(self.__width ) ] return sum(__lowerCamelCase ) def _UpperCamelCase ( snake_case__ ) -> Matrix: __UpperCAmelCase : list[list[float]] = [[0] * n for _ in range(snake_case__ )] return Matrix(snake_case__, snake_case__, snake_case__ ) def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> Matrix: random.seed(snake_case__ ) __UpperCAmelCase : list[list[float]] = [ [random.randint(snake_case__, snake_case__ ) for _ in range(snake_case__ )] for _ in range(snake_case__ ) ] return Matrix(snake_case__, snake_case__, snake_case__ )
342
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _snake_case = { '''configuration_trajectory_transformer''': [ '''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrajectoryTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrajectoryTransformerModel''', '''TrajectoryTransformerPreTrainedModel''', '''load_tf_weights_in_trajectory_transformer''', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
342
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool _snake_case = { '''Acehnese Arabic''': '''ace_Arab''', '''Acehnese Latin''': '''ace_Latn''', '''Mesopotamian Arabic''': '''acm_Arab''', '''Ta\'izzi-Adeni Arabic''': '''acq_Arab''', '''Tunisian Arabic''': '''aeb_Arab''', '''Afrikaans''': '''afr_Latn''', '''South Levantine Arabic''': '''ajp_Arab''', '''Akan''': '''aka_Latn''', '''Amharic''': '''amh_Ethi''', '''North Levantine Arabic''': '''apc_Arab''', '''Modern Standard Arabic''': '''arb_Arab''', '''Modern Standard Arabic Romanized''': '''arb_Latn''', '''Najdi Arabic''': '''ars_Arab''', '''Moroccan Arabic''': '''ary_Arab''', '''Egyptian Arabic''': '''arz_Arab''', '''Assamese''': '''asm_Beng''', '''Asturian''': '''ast_Latn''', '''Awadhi''': '''awa_Deva''', '''Central Aymara''': '''ayr_Latn''', '''South Azerbaijani''': '''azb_Arab''', '''North Azerbaijani''': '''azj_Latn''', '''Bashkir''': '''bak_Cyrl''', '''Bambara''': '''bam_Latn''', '''Balinese''': '''ban_Latn''', '''Belarusian''': '''bel_Cyrl''', '''Bemba''': '''bem_Latn''', '''Bengali''': '''ben_Beng''', '''Bhojpuri''': '''bho_Deva''', '''Banjar Arabic''': '''bjn_Arab''', '''Banjar Latin''': '''bjn_Latn''', '''Standard Tibetan''': '''bod_Tibt''', '''Bosnian''': '''bos_Latn''', '''Buginese''': '''bug_Latn''', '''Bulgarian''': '''bul_Cyrl''', '''Catalan''': '''cat_Latn''', '''Cebuano''': '''ceb_Latn''', '''Czech''': '''ces_Latn''', '''Chokwe''': '''cjk_Latn''', '''Central Kurdish''': '''ckb_Arab''', '''Crimean Tatar''': '''crh_Latn''', '''Welsh''': '''cym_Latn''', '''Danish''': '''dan_Latn''', '''German''': '''deu_Latn''', '''Southwestern Dinka''': '''dik_Latn''', '''Dyula''': '''dyu_Latn''', '''Dzongkha''': '''dzo_Tibt''', '''Greek''': '''ell_Grek''', '''English''': '''eng_Latn''', '''Esperanto''': '''epo_Latn''', '''Estonian''': '''est_Latn''', '''Basque''': '''eus_Latn''', '''Ewe''': '''ewe_Latn''', '''Faroese''': '''fao_Latn''', '''Fijian''': '''fij_Latn''', '''Finnish''': '''fin_Latn''', '''Fon''': '''fon_Latn''', '''French''': '''fra_Latn''', '''Friulian''': '''fur_Latn''', '''Nigerian Fulfulde''': '''fuv_Latn''', '''Scottish Gaelic''': '''gla_Latn''', '''Irish''': '''gle_Latn''', '''Galician''': '''glg_Latn''', '''Guarani''': '''grn_Latn''', '''Gujarati''': '''guj_Gujr''', '''Haitian Creole''': '''hat_Latn''', '''Hausa''': '''hau_Latn''', '''Hebrew''': '''heb_Hebr''', '''Hindi''': '''hin_Deva''', '''Chhattisgarhi''': '''hne_Deva''', '''Croatian''': '''hrv_Latn''', '''Hungarian''': '''hun_Latn''', '''Armenian''': '''hye_Armn''', '''Igbo''': '''ibo_Latn''', '''Ilocano''': '''ilo_Latn''', '''Indonesian''': '''ind_Latn''', '''Icelandic''': '''isl_Latn''', '''Italian''': '''ita_Latn''', '''Javanese''': '''jav_Latn''', '''Japanese''': '''jpn_Jpan''', '''Kabyle''': '''kab_Latn''', '''Jingpho''': '''kac_Latn''', '''Kamba''': '''kam_Latn''', '''Kannada''': '''kan_Knda''', '''Kashmiri Arabic''': '''kas_Arab''', '''Kashmiri Devanagari''': '''kas_Deva''', '''Georgian''': '''kat_Geor''', '''Central Kanuri Arabic''': '''knc_Arab''', '''Central Kanuri Latin''': '''knc_Latn''', '''Kazakh''': '''kaz_Cyrl''', '''Kabiyè''': '''kbp_Latn''', '''Kabuverdianu''': '''kea_Latn''', '''Khmer''': '''khm_Khmr''', '''Kikuyu''': '''kik_Latn''', '''Kinyarwanda''': '''kin_Latn''', '''Kyrgyz''': '''kir_Cyrl''', '''Kimbundu''': '''kmb_Latn''', '''Northern Kurdish''': '''kmr_Latn''', '''Kikongo''': '''kon_Latn''', '''Korean''': '''kor_Hang''', '''Lao''': '''lao_Laoo''', '''Ligurian''': '''lij_Latn''', '''Limburgish''': '''lim_Latn''', '''Lingala''': '''lin_Latn''', '''Lithuanian''': '''lit_Latn''', '''Lombard''': '''lmo_Latn''', '''Latgalian''': '''ltg_Latn''', '''Luxembourgish''': '''ltz_Latn''', '''Luba-Kasai''': '''lua_Latn''', '''Ganda''': '''lug_Latn''', '''Luo''': '''luo_Latn''', '''Mizo''': '''lus_Latn''', '''Standard Latvian''': '''lvs_Latn''', '''Magahi''': '''mag_Deva''', '''Maithili''': '''mai_Deva''', '''Malayalam''': '''mal_Mlym''', '''Marathi''': '''mar_Deva''', '''Minangkabau Arabic ''': '''min_Arab''', '''Minangkabau Latin''': '''min_Latn''', '''Macedonian''': '''mkd_Cyrl''', '''Plateau Malagasy''': '''plt_Latn''', '''Maltese''': '''mlt_Latn''', '''Meitei Bengali''': '''mni_Beng''', '''Halh Mongolian''': '''khk_Cyrl''', '''Mossi''': '''mos_Latn''', '''Maori''': '''mri_Latn''', '''Burmese''': '''mya_Mymr''', '''Dutch''': '''nld_Latn''', '''Norwegian Nynorsk''': '''nno_Latn''', '''Norwegian Bokmål''': '''nob_Latn''', '''Nepali''': '''npi_Deva''', '''Northern Sotho''': '''nso_Latn''', '''Nuer''': '''nus_Latn''', '''Nyanja''': '''nya_Latn''', '''Occitan''': '''oci_Latn''', '''West Central Oromo''': '''gaz_Latn''', '''Odia''': '''ory_Orya''', '''Pangasinan''': '''pag_Latn''', '''Eastern Panjabi''': '''pan_Guru''', '''Papiamento''': '''pap_Latn''', '''Western Persian''': '''pes_Arab''', '''Polish''': '''pol_Latn''', '''Portuguese''': '''por_Latn''', '''Dari''': '''prs_Arab''', '''Southern Pashto''': '''pbt_Arab''', '''Ayacucho Quechua''': '''quy_Latn''', '''Romanian''': '''ron_Latn''', '''Rundi''': '''run_Latn''', '''Russian''': '''rus_Cyrl''', '''Sango''': '''sag_Latn''', '''Sanskrit''': '''san_Deva''', '''Santali''': '''sat_Olck''', '''Sicilian''': '''scn_Latn''', '''Shan''': '''shn_Mymr''', '''Sinhala''': '''sin_Sinh''', '''Slovak''': '''slk_Latn''', '''Slovenian''': '''slv_Latn''', '''Samoan''': '''smo_Latn''', '''Shona''': '''sna_Latn''', '''Sindhi''': '''snd_Arab''', '''Somali''': '''som_Latn''', '''Southern Sotho''': '''sot_Latn''', '''Spanish''': '''spa_Latn''', '''Tosk Albanian''': '''als_Latn''', '''Sardinian''': '''srd_Latn''', '''Serbian''': '''srp_Cyrl''', '''Swati''': '''ssw_Latn''', '''Sundanese''': '''sun_Latn''', '''Swedish''': '''swe_Latn''', '''Swahili''': '''swh_Latn''', '''Silesian''': '''szl_Latn''', '''Tamil''': '''tam_Taml''', '''Tatar''': '''tat_Cyrl''', '''Telugu''': '''tel_Telu''', '''Tajik''': '''tgk_Cyrl''', '''Tagalog''': '''tgl_Latn''', '''Thai''': '''tha_Thai''', '''Tigrinya''': '''tir_Ethi''', '''Tamasheq Latin''': '''taq_Latn''', '''Tamasheq Tifinagh''': '''taq_Tfng''', '''Tok Pisin''': '''tpi_Latn''', '''Tswana''': '''tsn_Latn''', '''Tsonga''': '''tso_Latn''', '''Turkmen''': '''tuk_Latn''', '''Tumbuka''': '''tum_Latn''', '''Turkish''': '''tur_Latn''', '''Twi''': '''twi_Latn''', '''Central Atlas Tamazight''': '''tzm_Tfng''', '''Uyghur''': '''uig_Arab''', '''Ukrainian''': '''ukr_Cyrl''', '''Umbundu''': '''umb_Latn''', '''Urdu''': '''urd_Arab''', '''Northern Uzbek''': '''uzn_Latn''', '''Venetian''': '''vec_Latn''', '''Vietnamese''': '''vie_Latn''', '''Waray''': '''war_Latn''', '''Wolof''': '''wol_Latn''', '''Xhosa''': '''xho_Latn''', '''Eastern Yiddish''': '''ydd_Hebr''', '''Yoruba''': '''yor_Latn''', '''Yue Chinese''': '''yue_Hant''', '''Chinese Simplified''': '''zho_Hans''', '''Chinese Traditional''': '''zho_Hant''', '''Standard Malay''': '''zsm_Latn''', '''Zulu''': '''zul_Latn''', } class _snake_case ( _lowercase ): lowerCamelCase__: Tuple = "facebook/nllb-200-distilled-600M" lowerCamelCase__: List[Any] = ( "This is a tool that translates text from a language to another. It takes three inputs: `text`, which should " "be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, " "which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in " "plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`." ) lowerCamelCase__: Any = "translator" lowerCamelCase__: List[Any] = AutoTokenizer lowerCamelCase__: Dict = AutoModelForSeqaSeqLM lowerCamelCase__: Optional[Any] = LANGUAGE_CODES lowerCamelCase__: List[str] = ["text", "text", "text"] lowerCamelCase__: Dict = ["text"] def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: List[str] ) -> str: if src_lang not in self.lang_to_code: raise ValueError(f'''{src_lang} is not a supported language.''' ) if tgt_lang not in self.lang_to_code: raise ValueError(f'''{tgt_lang} is not a supported language.''' ) __UpperCAmelCase : Union[str, Any] = self.lang_to_code[src_lang] __UpperCAmelCase : Dict = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( __lowerCamelCase , return_tensors="pt" , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: str ) -> str: return self.model.generate(**__lowerCamelCase ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: str ) -> Optional[int]: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__lowerCamelCase )
342
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]: __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : Optional[int] = image_size __UpperCAmelCase : List[str] = num_channels __UpperCAmelCase : Union[str, Any] = num_stages __UpperCAmelCase : List[str] = hidden_sizes __UpperCAmelCase : Any = depths __UpperCAmelCase : Optional[int] = is_training __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : Optional[int] = intermediate_size __UpperCAmelCase : Optional[Any] = hidden_act __UpperCAmelCase : Union[str, Any] = num_labels __UpperCAmelCase : Any = initializer_range __UpperCAmelCase : List[str] = out_features __UpperCAmelCase : Tuple = out_indices __UpperCAmelCase : List[Any] = scope def _lowerCamelCase ( self: List[Any] ) -> Optional[int]: __UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : List[str] = None if self.use_labels: __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase : Optional[Any] = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self: Tuple ) -> List[Any]: return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]: __UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : List[str] = model(__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple: __UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Any = model(__lowerCamelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Any = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _lowerCamelCase ( self: int ) -> List[str]: __UpperCAmelCase : int = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs __UpperCAmelCase : str = {"pixel_values": pixel_values} return config, inputs_dict def _lowerCamelCase ( self: List[Any] ) -> List[Any]: __UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class _snake_case ( _lowercase , _lowercase , unittest.TestCase ): lowerCamelCase__: Dict = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCamelCase__: str = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCamelCase__: Tuple = False lowerCamelCase__: int = False lowerCamelCase__: Dict = False lowerCamelCase__: int = False lowerCamelCase__: Any = False def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]: __UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self ) __UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def _lowerCamelCase ( self: Dict ) -> Tuple: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self: List[Any] ) -> int: return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def _lowerCamelCase ( self: Any ) -> Any: pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def _lowerCamelCase ( self: str ) -> Optional[Any]: pass def _lowerCamelCase ( self: List[Any] ) -> int: if not self.model_tester.is_training: return for model_class in self.all_model_classes: __UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels() __UpperCAmelCase : Optional[Any] = True if model_class.__name__ in [ *get_values(__lowerCamelCase ), *get_values(__lowerCamelCase ), ]: continue __UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() __UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) __UpperCAmelCase : Any = model(**__lowerCamelCase ).loss loss.backward() def _lowerCamelCase ( self: Optional[int] ) -> Dict: if not self.model_tester.is_training: return for model_class in self.all_model_classes: __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels() __UpperCAmelCase : List[str] = False __UpperCAmelCase : int = True if ( model_class.__name__ in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )] or not model_class.supports_gradient_checkpointing ): continue __UpperCAmelCase : int = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.gradient_checkpointing_enable() model.train() __UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) __UpperCAmelCase : Any = model(**__lowerCamelCase ).loss loss.backward() def _lowerCamelCase ( self: List[str] ) -> Dict: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : str = model_class(__lowerCamelCase ) __UpperCAmelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : List[Any] = [*signature.parameters.keys()] __UpperCAmelCase : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _lowerCamelCase ( self: str ) -> List[Any]: __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _lowerCamelCase ( self: Union[str, Any] ) -> Dict: def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ): __UpperCAmelCase : Any = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) __UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : Optional[int] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Optional[int] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Any = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def _lowerCamelCase ( self: Dict ) -> List[Any]: for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def _UpperCamelCase ( ) -> List[Any]: __UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self: Optional[int] ) -> Dict: return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def _lowerCamelCase ( self: List[Any] ) -> Tuple: __UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase ) __UpperCAmelCase : List[str] = self.default_image_processor __UpperCAmelCase : Optional[Any] = prepare_img() __UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): __UpperCAmelCase : str = model(**__lowerCamelCase ) # verify the logits __UpperCAmelCase : Dict = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) __UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
342
1
from datetime import datetime import matplotlib.pyplot as plt import torch def _UpperCamelCase ( snake_case__ ) -> Optional[Any]: for param in module.parameters(): __UpperCAmelCase : Union[str, Any] = False def _UpperCamelCase ( ) -> Any: __UpperCAmelCase : List[str] = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): __UpperCAmelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def _UpperCamelCase ( snake_case__ ) -> List[str]: __UpperCAmelCase : Optional[Any] = plt.imshow(snake_case__ ) fig.axes.get_xaxis().set_visible(snake_case__ ) fig.axes.get_yaxis().set_visible(snake_case__ ) plt.show() def _UpperCamelCase ( ) -> Optional[int]: __UpperCAmelCase : int = datetime.now() __UpperCAmelCase : Optional[int] = current_time.strftime("%H:%M:%S" ) return timestamp
342
import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _snake_case = logging.get_logger(__name__) _snake_case = { '''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''', # See all DETR models at https://huggingface.co/models?filter=detr } class _snake_case ( _lowercase ): lowerCamelCase__: str = "detr" lowerCamelCase__: Dict = ["past_key_values"] lowerCamelCase__: str = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int: if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): __UpperCAmelCase : List[Any] = backbone_config.get("model_type" ) __UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type] __UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase ) # set timm attributes to None __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None __UpperCAmelCase : Any = use_timm_backbone __UpperCAmelCase : Optional[Any] = backbone_config __UpperCAmelCase : Optional[Any] = num_channels __UpperCAmelCase : List[Any] = num_queries __UpperCAmelCase : Optional[int] = d_model __UpperCAmelCase : Optional[Any] = encoder_ffn_dim __UpperCAmelCase : Dict = encoder_layers __UpperCAmelCase : List[Any] = encoder_attention_heads __UpperCAmelCase : int = decoder_ffn_dim __UpperCAmelCase : Tuple = decoder_layers __UpperCAmelCase : int = decoder_attention_heads __UpperCAmelCase : List[Any] = dropout __UpperCAmelCase : Dict = attention_dropout __UpperCAmelCase : Optional[Any] = activation_dropout __UpperCAmelCase : int = activation_function __UpperCAmelCase : Any = init_std __UpperCAmelCase : str = init_xavier_std __UpperCAmelCase : int = encoder_layerdrop __UpperCAmelCase : Tuple = decoder_layerdrop __UpperCAmelCase : List[Any] = encoder_layers __UpperCAmelCase : Optional[Any] = auxiliary_loss __UpperCAmelCase : int = position_embedding_type __UpperCAmelCase : Optional[int] = backbone __UpperCAmelCase : str = use_pretrained_backbone __UpperCAmelCase : Dict = dilation # Hungarian matcher __UpperCAmelCase : Optional[int] = class_cost __UpperCAmelCase : Optional[Any] = bbox_cost __UpperCAmelCase : Optional[int] = giou_cost # Loss coefficients __UpperCAmelCase : Any = mask_loss_coefficient __UpperCAmelCase : Any = dice_loss_coefficient __UpperCAmelCase : Any = bbox_loss_coefficient __UpperCAmelCase : Optional[int] = giou_loss_coefficient __UpperCAmelCase : Optional[Any] = eos_coefficient super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase ) @property def _lowerCamelCase ( self: Dict ) -> int: return self.encoder_attention_heads @property def _lowerCamelCase ( self: str ) -> int: return self.d_model @classmethod def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]: return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: str ) -> Dict[str, any]: __UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: __UpperCAmelCase : int = self.backbone_config.to_dict() __UpperCAmelCase : List[str] = self.__class__.model_type return output class _snake_case ( _lowercase ): lowerCamelCase__: Optional[int] = version.parse("1.11" ) @property def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _lowerCamelCase ( self: Optional[Any] ) -> float: return 1e-5 @property def _lowerCamelCase ( self: List[str] ) -> int: return 12
342
1
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _snake_case = logging.get_logger(__name__) _snake_case = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''} _snake_case = { '''vocab_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''', }, '''emoji_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''', }, } _snake_case = { '''abeja/gpt-neox-japanese-2.7b''': 2048, } def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[int]: with open(snake_case__, "r", encoding="utf-8" ) as f: __UpperCAmelCase : Tuple = json.loads(f.read() ) __UpperCAmelCase : Any = collections.OrderedDict() __UpperCAmelCase : List[str] = collections.OrderedDict() __UpperCAmelCase : Tuple = collections.OrderedDict() with open(snake_case__, "r", encoding="utf-8" ) as f: __UpperCAmelCase : List[Any] = f.readlines() __UpperCAmelCase : str = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token] for idx, b in enumerate(snake_case__ ): __UpperCAmelCase : str = b __UpperCAmelCase : int = idx for wd in b: __UpperCAmelCase : Any = idx return vocab, raw_vocab, ids_to_tokens, emoji class _snake_case ( _lowercase ): lowerCamelCase__: List[Any] = VOCAB_FILES_NAMES lowerCamelCase__: List[str] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__: Dict = ["input_ids", "attention_mask"] def __init__( self: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: int="<|endoftext|>" , __lowerCamelCase: Dict="<|endoftext|>" , __lowerCamelCase: Any="<|startoftext|>" , __lowerCamelCase: Tuple="<|endoftext|>" , __lowerCamelCase: Tuple=False , **__lowerCamelCase: int , ) -> int: super().__init__( unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , do_clean_text=__lowerCamelCase , **__lowerCamelCase , ) if not os.path.isfile(__lowerCamelCase ): raise ValueError( f'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained''' " model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) if not os.path.isfile(__lowerCamelCase ): raise ValueError( f'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google''' " pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) __UpperCAmelCase : Tuple = do_clean_text __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = load_vocab_and_emoji(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def _lowerCamelCase ( self: Dict ) -> str: # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab return len(self.raw_vocab ) def _lowerCamelCase ( self: Any ) -> str: return dict(self.raw_vocab , **self.added_tokens_encoder ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Optional[Any] ) -> Any: return self.subword_tokenizer.tokenize(__lowerCamelCase , clean=self.do_clean_text ) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Dict ) -> Dict: return self.vocab.get(__lowerCamelCase , self.vocab.get(self.unk_token ) ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: Any ) -> List[str]: return self.subword_tokenizer.convert_id_to_token(__lowerCamelCase ) def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: List[str] ) -> Optional[int]: __UpperCAmelCase : Optional[int] = "".join(__lowerCamelCase ).strip() return out_string def _lowerCamelCase ( self: str , __lowerCamelCase: "Conversation" ) -> List[int]: __UpperCAmelCase : Union[str, Any] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) + [self.eos_token_id] ) if len(__lowerCamelCase ) > self.model_max_length: __UpperCAmelCase : int = input_ids[-self.model_max_length :] return input_ids def _lowerCamelCase ( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]: __UpperCAmelCase : Union[str, Any] = 0 if os.path.isdir(__lowerCamelCase ): __UpperCAmelCase : List[str] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __UpperCAmelCase : Optional[Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] ) else: __UpperCAmelCase : Tuple = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"] ) __UpperCAmelCase : Any = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' " Please check that the vocabulary is not corrupted!" ) __UpperCAmelCase : Optional[int] = token_index writer.write(",".join(__lowerCamelCase ) + "\n" ) index += 1 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: json.dump(self.emoji , __lowerCamelCase ) return vocab_file, emoji_file class _snake_case ( _lowercase ): def __init__( self: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Tuple , __lowerCamelCase: Dict ) -> Optional[int]: __UpperCAmelCase : Union[str, Any] = vocab # same as swe __UpperCAmelCase : Dict = ids_to_tokens # same as bpe __UpperCAmelCase : List[str] = emoji __UpperCAmelCase : Optional[int] = np.max([len(__lowerCamelCase ) for w in self.vocab.keys()] ) __UpperCAmelCase : Tuple = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" ) __UpperCAmelCase : List[Any] = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" ) __UpperCAmelCase : List[Any] = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" ) __UpperCAmelCase : int = re.compile( R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) __UpperCAmelCase : Union[str, Any] = re.compile( R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) __UpperCAmelCase : List[str] = re.compile( R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" ) __UpperCAmelCase : List[str] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿" __UpperCAmelCase : List[str] = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟" __UpperCAmelCase : Optional[int] = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} ) def __len__( self: Dict ) -> int: return len(self.ids_to_tokens ) def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Any ) -> Any: __UpperCAmelCase : str = self.content_repattera.sub("<URL>" , __lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = self.content_repattera.sub("<EMAIL>" , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = self.content_repattera.sub("<TEL>" , __lowerCamelCase ) __UpperCAmelCase : Any = self.content_repattera.sub("<DATE>" , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = self.content_repattera.sub("<DATE>" , __lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.content_repattera.sub("<PRICE>" , __lowerCamelCase ) __UpperCAmelCase : str = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: __UpperCAmelCase : Union[str, Any] = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" ) return content def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str=False ) -> Dict: __UpperCAmelCase : List[Any] = text.replace(" " , "<SP>" ) __UpperCAmelCase : str = text.replace(" " , "<SP>" ) __UpperCAmelCase : Tuple = text.replace("\r\n" , "<BR>" ) __UpperCAmelCase : Optional[Any] = text.replace("\n" , "<BR>" ) __UpperCAmelCase : List[str] = text.replace("\r" , "<BR>" ) __UpperCAmelCase : List[str] = text.replace("\t" , "<TAB>" ) __UpperCAmelCase : List[Any] = text.replace("—" , "ー" ) __UpperCAmelCase : Dict = text.replace("−" , "ー" ) for k, v in self.emoji["emoji"].items(): if k in text: __UpperCAmelCase : Optional[int] = text.replace(__lowerCamelCase , __lowerCamelCase ) if clean: __UpperCAmelCase : Any = self.clean_text(__lowerCamelCase ) def check_simbol(__lowerCamelCase: Union[str, Any] ): __UpperCAmelCase : Any = x.encode() if len(__lowerCamelCase ) == 1 and len(__lowerCamelCase ) == 2: __UpperCAmelCase : int = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0xC_2_A_1 and c <= 0xC_2_B_F) or (c >= 0xC_7_8_0 and c <= 0xC_7_8_3) or (c >= 0xC_A_B_9 and c <= 0xC_B_B_F) or (c >= 0xC_C_8_0 and c <= 0xC_D_A_2) ): return True return False def checkuae(__lowerCamelCase: Any ): __UpperCAmelCase : List[Any] = x.encode() if len(__lowerCamelCase ) == 1 and len(__lowerCamelCase ) == 3: __UpperCAmelCase : List[Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0xE_2_8_0_8_0 and c <= 0xE_2_B_0_7_F: return True return False __UpperCAmelCase : int = 0 __UpperCAmelCase : Tuple = [] while pos < len(__lowerCamelCase ): __UpperCAmelCase : Union[str, Any] = min(len(__lowerCamelCase ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3 __UpperCAmelCase : str = [] # (token_id, token, pos) for e in range(__lowerCamelCase , __lowerCamelCase , -1 ): __UpperCAmelCase : List[Any] = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(__lowerCamelCase ) > 2: __UpperCAmelCase : int = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(__lowerCamelCase ) > 0: # the smallest token_id is adopted __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[0] )[0] result.append(__lowerCamelCase ) __UpperCAmelCase : str = e else: __UpperCAmelCase : List[str] = pos + 1 __UpperCAmelCase : List[str] = text[pos:end] if check_simbol(__lowerCamelCase ): result.append("<KIGOU>" ) elif checkuae(__lowerCamelCase ): result.append("<U2000U2BFF>" ) else: for i in wd.encode("utf-8" ): result.append("<|byte%d|>" % i ) __UpperCAmelCase : Union[str, Any] = end return result def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: int="\n" ) -> Any: __UpperCAmelCase : List[str] = [] __UpperCAmelCase : Dict = [] __UpperCAmelCase : Tuple = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(__lowerCamelCase ) > 0: words.append(bytearray(__lowerCamelCase ).decode("utf-8" , errors="replace" ) ) __UpperCAmelCase : int = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["emoji_inv"][word] ) elif word == "<SP>": words.append(" " ) elif word == "<BR>": words.append(__lowerCamelCase ) elif word == "<TAB>": words.append("\t" ) elif word == "<BLOCK>": words.append("▀" ) elif word == "<KIGOU>": words.append("ǀ" ) elif word == "<U2000U2BFF>": words.append("‖" ) else: words.append(__lowerCamelCase ) if len(__lowerCamelCase ) > 0: words.append(bytearray(__lowerCamelCase ).decode("utf-8" , errors="replace" ) ) __UpperCAmelCase : Any = "".join(__lowerCamelCase ) return text
342
from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=1e-1_2 ) -> str: __UpperCAmelCase : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T __UpperCAmelCase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T return jnp.matmul(snake_case__, norm_emb_a.T ) class _snake_case ( nn.Module ): lowerCamelCase__: CLIPConfig lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: Any ) -> Tuple: __UpperCAmelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config ) __UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype ) __UpperCAmelCase : int = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) ) __UpperCAmelCase : int = self.param( "special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) ) __UpperCAmelCase : Tuple = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) ) __UpperCAmelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) ) def __call__( self: List[Any] , __lowerCamelCase: Dict ) -> Dict: __UpperCAmelCase : Optional[int] = self.vision_model(__lowerCamelCase )[1] __UpperCAmelCase : List[str] = self.visual_projection(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds ) __UpperCAmelCase : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs __UpperCAmelCase : List[str] = 0.0 __UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment __UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 ) __UpperCAmelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase ) # Use a lower threshold if an image has any special care concept __UpperCAmelCase : List[Any] = is_special_care * 0.01 __UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment __UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 ) __UpperCAmelCase : Any = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class _snake_case ( _lowercase ): lowerCamelCase__: int = CLIPConfig lowerCamelCase__: Tuple = "clip_input" lowerCamelCase__: str = FlaxStableDiffusionSafetyCheckerModule def __init__( self: Union[str, Any] , __lowerCamelCase: CLIPConfig , __lowerCamelCase: Optional[Tuple] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: jnp.dtype = jnp.floataa , __lowerCamelCase: bool = True , **__lowerCamelCase: Optional[int] , ) -> int: if input_shape is None: __UpperCAmelCase : Dict = (1, 2_24, 2_24, 3) __UpperCAmelCase : Tuple = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase ) super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: jax.random.KeyArray , __lowerCamelCase: Tuple , __lowerCamelCase: FrozenDict = None ) -> FrozenDict: # init input tensor __UpperCAmelCase : Tuple = jax.random.normal(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Dict = jax.random.split(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng} __UpperCAmelCase : str = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"] return random_params def __call__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: dict = None , ) -> List[Any]: __UpperCAmelCase : int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) ) return self.module.apply( {"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
342
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''adapter_layer''': '''encoder.layers.*.adapter_layer''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', '''pooling_layer.linear''': '''projector''', '''pooling_layer.projection''': '''classifier''', } _snake_case = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''projector''', '''classifier''', ] def _UpperCamelCase ( snake_case__ ) -> Dict: __UpperCAmelCase : Any = {} with open(snake_case__, "r" ) as file: for line_number, line in enumerate(snake_case__ ): __UpperCAmelCase : Any = line.strip() if line: __UpperCAmelCase : List[Any] = line.split() __UpperCAmelCase : int = line_number __UpperCAmelCase : Optional[int] = words[0] __UpperCAmelCase : Tuple = value return result def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> Tuple: for attribute in key.split("." ): __UpperCAmelCase : Optional[Any] = getattr(snake_case__, snake_case__ ) __UpperCAmelCase : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(snake_case__ ): __UpperCAmelCase : Union[str, Any] = PARAM_MAPPING[full_name.split("." )[-1]] __UpperCAmelCase : Dict = "param" if weight_type is not None and weight_type != "param": __UpperCAmelCase : Any = getattr(snake_case__, snake_case__ ).shape elif weight_type is not None and weight_type == "param": __UpperCAmelCase : int = hf_pointer for attribute in hf_param_name.split("." ): __UpperCAmelCase : Dict = getattr(snake_case__, snake_case__ ) __UpperCAmelCase : List[Any] = shape_pointer.shape # let's reduce dimension __UpperCAmelCase : Any = value[0] else: __UpperCAmelCase : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": __UpperCAmelCase : Any = value elif weight_type == "weight_g": __UpperCAmelCase : List[str] = value elif weight_type == "weight_v": __UpperCAmelCase : List[Any] = value elif weight_type == "bias": __UpperCAmelCase : Union[str, Any] = value elif weight_type == "param": for attribute in hf_param_name.split("." ): __UpperCAmelCase : Optional[Any] = getattr(snake_case__, snake_case__ ) __UpperCAmelCase : Tuple = value else: __UpperCAmelCase : str = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> str: __UpperCAmelCase : Union[str, Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(snake_case__ ): __UpperCAmelCase : Union[str, Any] = PARAM_MAPPING[full_name.split("." )[-1]] __UpperCAmelCase : Any = "param" if weight_type is not None and weight_type != "param": __UpperCAmelCase : Optional[int] = ".".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __UpperCAmelCase : Dict = ".".join([key, hf_param_name] ) else: __UpperCAmelCase : List[Any] = key __UpperCAmelCase : List[str] = value if "lm_head" in full_key else value[0] _snake_case = { '''W_a''': '''linear_1.weight''', '''W_b''': '''linear_2.weight''', '''b_a''': '''linear_1.bias''', '''b_b''': '''linear_2.bias''', '''ln_W''': '''norm.weight''', '''ln_b''': '''norm.bias''', } def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=None, snake_case__=None ) -> int: __UpperCAmelCase : Tuple = False for key, mapped_key in MAPPING.items(): __UpperCAmelCase : Dict = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: __UpperCAmelCase : Optional[Any] = True if "*" in mapped_key: __UpperCAmelCase : List[str] = name.split(snake_case__ )[0].split("." )[-2] __UpperCAmelCase : Optional[Any] = mapped_key.replace("*", snake_case__ ) if "weight_g" in name: __UpperCAmelCase : Optional[int] = "weight_g" elif "weight_v" in name: __UpperCAmelCase : Union[str, Any] = "weight_v" elif "bias" in name: __UpperCAmelCase : Optional[int] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj __UpperCAmelCase : List[str] = "weight" else: __UpperCAmelCase : Optional[Any] = None if hf_dict is not None: rename_dict(snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) else: set_recursively(snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) return is_used return is_used def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> List[str]: __UpperCAmelCase : int = [] __UpperCAmelCase : Tuple = fairseq_model.state_dict() __UpperCAmelCase : str = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __UpperCAmelCase : Optional[int] = False if "conv_layers" in name: load_conv_layer( snake_case__, snake_case__, snake_case__, snake_case__, hf_model.config.feat_extract_norm == "group", ) __UpperCAmelCase : Tuple = True else: __UpperCAmelCase : Dict = load_wavaveca_layer(snake_case__, snake_case__, snake_case__ ) if not is_used: unused_weights.append(snake_case__ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> List[str]: __UpperCAmelCase : Optional[int] = full_name.split("conv_layers." )[-1] __UpperCAmelCase : str = name.split("." ) __UpperCAmelCase : Dict = int(items[0] ) __UpperCAmelCase : Any = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __UpperCAmelCase : Union[str, Any] = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __UpperCAmelCase : int = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) __UpperCAmelCase : Tuple = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) __UpperCAmelCase : Tuple = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(snake_case__ ) @torch.no_grad() def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=None, snake_case__=None, snake_case__=True, snake_case__=False ) -> List[Any]: if config_path is not None: __UpperCAmelCase : Optional[int] = WavaVecaConfig.from_pretrained(snake_case__ ) else: __UpperCAmelCase : Optional[Any] = WavaVecaConfig() if is_seq_class: __UpperCAmelCase : List[Any] = read_txt_into_dict(snake_case__ ) __UpperCAmelCase : Optional[Any] = idalabel __UpperCAmelCase : List[Any] = WavaVecaForSequenceClassification(snake_case__ ) __UpperCAmelCase : Dict = WavaVecaFeatureExtractor( feature_size=1, sampling_rate=1_6000, padding_value=0, do_normalize=snake_case__, return_attention_mask=snake_case__, ) feature_extractor.save_pretrained(snake_case__ ) elif is_finetuned: if dict_path: __UpperCAmelCase : List[str] = Dictionary.load(snake_case__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __UpperCAmelCase : int = target_dict.pad_index __UpperCAmelCase : Union[str, Any] = target_dict.bos_index __UpperCAmelCase : Optional[int] = target_dict.eos_index __UpperCAmelCase : List[str] = len(target_dict.symbols ) __UpperCAmelCase : List[Any] = os.path.join(snake_case__, "vocab.json" ) if not os.path.isdir(snake_case__ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(snake_case__ ) ) return os.makedirs(snake_case__, exist_ok=snake_case__ ) __UpperCAmelCase : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched __UpperCAmelCase : str = 0 __UpperCAmelCase : Union[str, Any] = 1 with open(snake_case__, "w", encoding="utf-8" ) as vocab_handle: json.dump(snake_case__, snake_case__ ) __UpperCAmelCase : Optional[int] = WavaVecaCTCTokenizer( snake_case__, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=snake_case__, ) __UpperCAmelCase : List[Any] = True if config.feat_extract_norm == "layer" else False __UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor( feature_size=1, sampling_rate=1_6000, padding_value=0, do_normalize=snake_case__, return_attention_mask=snake_case__, ) __UpperCAmelCase : Optional[int] = WavaVecaProcessor(feature_extractor=snake_case__, tokenizer=snake_case__ ) processor.save_pretrained(snake_case__ ) __UpperCAmelCase : Optional[int] = WavaVecaForCTC(snake_case__ ) else: __UpperCAmelCase : Dict = WavaVecaForPreTraining(snake_case__ ) if is_finetuned or is_seq_class: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: __UpperCAmelCase : Any = argparse.Namespace(task="audio_pretraining" ) __UpperCAmelCase : str = fairseq.tasks.setup_task(snake_case__ ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=snake_case__ ) __UpperCAmelCase : int = model[0].eval() recursively_load_weights(snake_case__, snake_case__, not is_finetuned ) hf_wavavec.save_pretrained(snake_case__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) parser.add_argument( '''--is_seq_class''', action='''store_true''', help='''Whether the model to convert is a fine-tuned sequence classification model or not''', ) _snake_case = parser.parse_args() _snake_case = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
342
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def _UpperCamelCase ( snake_case__ ) -> Tuple: __UpperCAmelCase : Union[str, Any] = 384 if "tiny" in model_name: __UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3] __UpperCAmelCase : List[Any] = [96, 192, 384, 768] if "small" in model_name: __UpperCAmelCase : Tuple = [3, 3, 27, 3] __UpperCAmelCase : Any = [96, 192, 384, 768] if "base" in model_name: __UpperCAmelCase : str = [3, 3, 27, 3] __UpperCAmelCase : str = [128, 256, 512, 1024] __UpperCAmelCase : str = 512 if "large" in model_name: __UpperCAmelCase : Dict = [3, 3, 27, 3] __UpperCAmelCase : int = [192, 384, 768, 1536] __UpperCAmelCase : Dict = 768 if "xlarge" in model_name: __UpperCAmelCase : List[Any] = [3, 3, 27, 3] __UpperCAmelCase : Tuple = [256, 512, 1024, 2048] __UpperCAmelCase : int = 1024 # set label information __UpperCAmelCase : List[Any] = 150 __UpperCAmelCase : str = "huggingface/label-files" __UpperCAmelCase : List[Any] = "ade20k-id2label.json" __UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) ) __UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()} __UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()} __UpperCAmelCase : int = ConvNextConfig( depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] ) __UpperCAmelCase : int = UperNetConfig( backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, ) return config def _UpperCamelCase ( snake_case__ ) -> Tuple: __UpperCAmelCase : Optional[int] = [] # fmt: off # stem rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") ) rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") ) rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") ) rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any: __UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ ) __UpperCAmelCase : Optional[int] = val def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]: __UpperCAmelCase : Dict = { "upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth", "upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth", "upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth", "upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth", "upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth", } __UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name] __UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"] __UpperCAmelCase : Dict = get_upernet_config(snake_case__ ) __UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): __UpperCAmelCase : str = state_dict.pop(snake_case__ ) if "bn" in key: __UpperCAmelCase : int = key.replace("bn", "batch_norm" ) __UpperCAmelCase : Union[str, Any] = val # rename keys __UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__, snake_case__, snake_case__ ) model.load_state_dict(snake_case__ ) # verify on image __UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" __UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" ) __UpperCAmelCase : str = SegformerImageProcessor() __UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values with torch.no_grad(): __UpperCAmelCase : Union[str, Any] = model(snake_case__ ) if model_name == "upernet-convnext-tiny": __UpperCAmelCase : Any = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ) elif model_name == "upernet-convnext-small": __UpperCAmelCase : Optional[Any] = torch.tensor( [[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] ) elif model_name == "upernet-convnext-base": __UpperCAmelCase : Dict = torch.tensor( [[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] ) elif model_name == "upernet-convnext-large": __UpperCAmelCase : Tuple = torch.tensor( [[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] ) elif model_name == "upernet-convnext-xlarge": __UpperCAmelCase : Union[str, Any] = torch.tensor( [[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] ) print("Logits:", outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case__ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(snake_case__ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-convnext-tiny''', type=str, choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']], help='''Name of the ConvNext UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _snake_case = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
342
1
from ..utils import DummyObject, requires_backends class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: int = ["sentencepiece"] def __init__( self: Dict , *__lowerCamelCase: Optional[int] , **__lowerCamelCase: Union[str, Any] ) -> List[str]: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: str = ["sentencepiece"] def __init__( self: int , *__lowerCamelCase: Optional[int] , **__lowerCamelCase: Tuple ) -> List[str]: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: int = ["sentencepiece"] def __init__( self: Union[str, Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: Any ) -> Any: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: Any = ["sentencepiece"] def __init__( self: Union[str, Any] , *__lowerCamelCase: Any , **__lowerCamelCase: Optional[Any] ) -> int: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: Union[str, Any] = ["sentencepiece"] def __init__( self: Dict , *__lowerCamelCase: List[str] , **__lowerCamelCase: Optional[Any] ) -> Union[str, Any]: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: Optional[Any] = ["sentencepiece"] def __init__( self: Tuple , *__lowerCamelCase: Optional[Any] , **__lowerCamelCase: Tuple ) -> Optional[int]: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: List[str] = ["sentencepiece"] def __init__( self: Any , *__lowerCamelCase: Optional[Any] , **__lowerCamelCase: Union[str, Any] ) -> Dict: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: Tuple = ["sentencepiece"] def __init__( self: int , *__lowerCamelCase: str , **__lowerCamelCase: Any ) -> Any: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: str = ["sentencepiece"] def __init__( self: Tuple , *__lowerCamelCase: str , **__lowerCamelCase: Tuple ) -> List[str]: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: Dict = ["sentencepiece"] def __init__( self: List[str] , *__lowerCamelCase: Any , **__lowerCamelCase: int ) -> List[Any]: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: Union[str, Any] = ["sentencepiece"] def __init__( self: Optional[Any] , *__lowerCamelCase: Union[str, Any] , **__lowerCamelCase: List[Any] ) -> str: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: str = ["sentencepiece"] def __init__( self: int , *__lowerCamelCase: List[Any] , **__lowerCamelCase: List[Any] ) -> List[Any]: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: List[str] = ["sentencepiece"] def __init__( self: Dict , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[str] ) -> Optional[Any]: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: int = ["sentencepiece"] def __init__( self: Optional[Any] , *__lowerCamelCase: Optional[int] , **__lowerCamelCase: Tuple ) -> Optional[int]: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: Union[str, Any] = ["sentencepiece"] def __init__( self: Optional[Any] , *__lowerCamelCase: Optional[Any] , **__lowerCamelCase: Tuple ) -> Optional[Any]: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: Union[str, Any] = ["sentencepiece"] def __init__( self: str , *__lowerCamelCase: Dict , **__lowerCamelCase: List[Any] ) -> Union[str, Any]: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: Tuple = ["sentencepiece"] def __init__( self: Optional[Any] , *__lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Tuple ) -> Tuple: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: Any = ["sentencepiece"] def __init__( self: List[str] , *__lowerCamelCase: int , **__lowerCamelCase: Dict ) -> str: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: List[str] = ["sentencepiece"] def __init__( self: Tuple , *__lowerCamelCase: List[Any] , **__lowerCamelCase: Dict ) -> int: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: List[str] = ["sentencepiece"] def __init__( self: Any , *__lowerCamelCase: List[Any] , **__lowerCamelCase: int ) -> Tuple: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: Union[str, Any] = ["sentencepiece"] def __init__( self: Optional[int] , *__lowerCamelCase: Union[str, Any] , **__lowerCamelCase: List[str] ) -> Union[str, Any]: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: Union[str, Any] = ["sentencepiece"] def __init__( self: Union[str, Any] , *__lowerCamelCase: Dict , **__lowerCamelCase: Optional[int] ) -> List[Any]: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: Optional[int] = ["sentencepiece"] def __init__( self: str , *__lowerCamelCase: str , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: str = ["sentencepiece"] def __init__( self: Any , *__lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> List[Any]: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: Tuple = ["sentencepiece"] def __init__( self: int , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[str] ) -> Any: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: Tuple = ["sentencepiece"] def __init__( self: Tuple , *__lowerCamelCase: Optional[Any] , **__lowerCamelCase: Tuple ) -> List[str]: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: int = ["sentencepiece"] def __init__( self: List[str] , *__lowerCamelCase: Tuple , **__lowerCamelCase: str ) -> str: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: List[str] = ["sentencepiece"] def __init__( self: Union[str, Any] , *__lowerCamelCase: int , **__lowerCamelCase: Dict ) -> str: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: Optional[int] = ["sentencepiece"] def __init__( self: List[Any] , *__lowerCamelCase: Tuple , **__lowerCamelCase: Optional[Any] ) -> List[Any]: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: int = ["sentencepiece"] def __init__( self: List[str] , *__lowerCamelCase: Any , **__lowerCamelCase: Optional[Any] ) -> Optional[Any]: requires_backends(self , ["sentencepiece"] ) class _snake_case ( metaclass=_lowercase ): lowerCamelCase__: List[Any] = ["sentencepiece"] def __init__( self: int , *__lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[int] ) -> Any: requires_backends(self , ["sentencepiece"] )
342
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class _snake_case ( _lowercase ): lowerCamelCase__: Dict = "roc_bert" def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]: __UpperCAmelCase : str = vocab_size __UpperCAmelCase : Dict = max_position_embeddings __UpperCAmelCase : Optional[Any] = hidden_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : List[Any] = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : Optional[int] = attention_probs_dropout_prob __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : Optional[Any] = type_vocab_size __UpperCAmelCase : List[Any] = layer_norm_eps __UpperCAmelCase : Optional[int] = use_cache __UpperCAmelCase : Optional[Any] = enable_pronunciation __UpperCAmelCase : Any = enable_shape __UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim __UpperCAmelCase : Optional[Any] = pronunciation_vocab_size __UpperCAmelCase : Optional[Any] = shape_embed_dim __UpperCAmelCase : List[Any] = shape_vocab_size __UpperCAmelCase : int = concat_input __UpperCAmelCase : int = position_embedding_type __UpperCAmelCase : Optional[int] = classifier_dropout super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
342
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class _snake_case ( _lowercase ): lowerCamelCase__: str = "ctrl" lowerCamelCase__: Dict = ["past_key_values"] lowerCamelCase__: Optional[Any] = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self: Tuple , __lowerCamelCase: str=24_65_34 , __lowerCamelCase: Union[str, Any]=2_56 , __lowerCamelCase: str=12_80 , __lowerCamelCase: int=81_92 , __lowerCamelCase: str=48 , __lowerCamelCase: List[str]=16 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Dict=1e-6 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: List[str]=True , **__lowerCamelCase: List[str] , ) -> Dict: __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : int = n_positions __UpperCAmelCase : Optional[int] = n_embd __UpperCAmelCase : int = n_layer __UpperCAmelCase : Tuple = n_head __UpperCAmelCase : Optional[Any] = dff __UpperCAmelCase : List[Any] = resid_pdrop __UpperCAmelCase : Dict = embd_pdrop __UpperCAmelCase : Any = layer_norm_epsilon __UpperCAmelCase : Optional[int] = initializer_range __UpperCAmelCase : Dict = use_cache super().__init__(**__lowerCamelCase )
342
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def _UpperCamelCase ( snake_case__ ) -> int: __UpperCAmelCase : int = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: __UpperCAmelCase : int = [144, 192, 240] __UpperCAmelCase : Optional[Any] = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: __UpperCAmelCase : Optional[Any] = [96, 120, 144] __UpperCAmelCase : Tuple = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: __UpperCAmelCase : str = [64, 80, 96] __UpperCAmelCase : Optional[Any] = [16, 16, 24, 48, 64, 80, 320] __UpperCAmelCase : Tuple = 0.05 __UpperCAmelCase : Dict = 2.0 if mobilevit_name.startswith("deeplabv3_" ): __UpperCAmelCase : str = 512 __UpperCAmelCase : Any = 16 __UpperCAmelCase : str = 21 __UpperCAmelCase : Union[str, Any] = "pascal-voc-id2label.json" else: __UpperCAmelCase : Optional[Any] = 1000 __UpperCAmelCase : int = "imagenet-1k-id2label.json" __UpperCAmelCase : Dict = "huggingface/label-files" __UpperCAmelCase : int = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) ) __UpperCAmelCase : Any = {int(snake_case__ ): v for k, v in idalabel.items()} __UpperCAmelCase : int = idalabel __UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()} return config def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Tuple: for i in range(1, 6 ): if f'''layer_{i}.''' in name: __UpperCAmelCase : Tuple = name.replace(f'''layer_{i}.''', f'''encoder.layer.{i - 1}.''' ) if "conv_1." in name: __UpperCAmelCase : Dict = name.replace("conv_1.", "conv_stem." ) if ".block." in name: __UpperCAmelCase : Optional[int] = name.replace(".block.", "." ) if "exp_1x1" in name: __UpperCAmelCase : Tuple = name.replace("exp_1x1", "expand_1x1" ) if "red_1x1" in name: __UpperCAmelCase : Optional[Any] = name.replace("red_1x1", "reduce_1x1" ) if ".local_rep.conv_3x3." in name: __UpperCAmelCase : Optional[int] = name.replace(".local_rep.conv_3x3.", ".conv_kxk." ) if ".local_rep.conv_1x1." in name: __UpperCAmelCase : Any = name.replace(".local_rep.conv_1x1.", ".conv_1x1." ) if ".norm." in name: __UpperCAmelCase : Dict = name.replace(".norm.", ".normalization." ) if ".conv." in name: __UpperCAmelCase : List[Any] = name.replace(".conv.", ".convolution." ) if ".conv_proj." in name: __UpperCAmelCase : List[str] = name.replace(".conv_proj.", ".conv_projection." ) for i in range(0, 2 ): for j in range(0, 4 ): if f'''.{i}.{j}.''' in name: __UpperCAmelCase : List[Any] = name.replace(f'''.{i}.{j}.''', f'''.{i}.layer.{j}.''' ) for i in range(2, 6 ): for j in range(0, 4 ): if f'''.{i}.{j}.''' in name: __UpperCAmelCase : Any = name.replace(f'''.{i}.{j}.''', f'''.{i}.''' ) if "expand_1x1" in name: __UpperCAmelCase : Optional[int] = name.replace("expand_1x1", "downsampling_layer.expand_1x1" ) if "conv_3x3" in name: __UpperCAmelCase : List[Any] = name.replace("conv_3x3", "downsampling_layer.conv_3x3" ) if "reduce_1x1" in name: __UpperCAmelCase : Dict = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" ) for i in range(2, 5 ): if f'''.global_rep.{i}.weight''' in name: __UpperCAmelCase : Any = name.replace(f'''.global_rep.{i}.weight''', ".layernorm.weight" ) if f'''.global_rep.{i}.bias''' in name: __UpperCAmelCase : Optional[Any] = name.replace(f'''.global_rep.{i}.bias''', ".layernorm.bias" ) if ".global_rep." in name: __UpperCAmelCase : Tuple = name.replace(".global_rep.", ".transformer." ) if ".pre_norm_mha.0." in name: __UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_mha.0.", ".layernorm_before." ) if ".pre_norm_mha.1.out_proj." in name: __UpperCAmelCase : Tuple = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." ) if ".pre_norm_ffn.0." in name: __UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_ffn.0.", ".layernorm_after." ) if ".pre_norm_ffn.1." in name: __UpperCAmelCase : Dict = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." ) if ".pre_norm_ffn.4." in name: __UpperCAmelCase : int = name.replace(".pre_norm_ffn.4.", ".output.dense." ) if ".transformer." in name: __UpperCAmelCase : Tuple = name.replace(".transformer.", ".transformer.layer." ) if ".aspp_layer." in name: __UpperCAmelCase : Any = name.replace(".aspp_layer.", "." ) if ".aspp_pool." in name: __UpperCAmelCase : Optional[Any] = name.replace(".aspp_pool.", "." ) if "seg_head." in name: __UpperCAmelCase : Optional[int] = name.replace("seg_head.", "segmentation_head." ) if "segmentation_head.classifier.classifier." in name: __UpperCAmelCase : str = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." ) if "classifier.fc." in name: __UpperCAmelCase : Optional[Any] = name.replace("classifier.fc.", "classifier." ) elif (not base_model) and ("segmentation_head." not in name): __UpperCAmelCase : List[str] = "mobilevit." + name return name def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=False ) -> Union[str, Any]: if base_model: __UpperCAmelCase : Optional[int] = "" else: __UpperCAmelCase : Tuple = "mobilevit." for key in orig_state_dict.copy().keys(): __UpperCAmelCase : Optional[int] = orig_state_dict.pop(snake_case__ ) if key[:8] == "encoder.": __UpperCAmelCase : str = key[8:] if "qkv" in key: __UpperCAmelCase : Tuple = key.split("." ) __UpperCAmelCase : List[Any] = int(key_split[0][6:] ) - 1 __UpperCAmelCase : Optional[Any] = int(key_split[3] ) __UpperCAmelCase : Tuple = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' ) __UpperCAmelCase : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size __UpperCAmelCase : Optional[Any] = ( f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.''' ) if "weight" in key: __UpperCAmelCase : Any = val[:dim, :] __UpperCAmelCase : Any = val[dim : dim * 2, :] __UpperCAmelCase : List[Any] = val[-dim:, :] else: __UpperCAmelCase : List[str] = val[:dim] __UpperCAmelCase : Optional[Any] = val[dim : dim * 2] __UpperCAmelCase : List[Any] = val[-dim:] else: __UpperCAmelCase : str = val return orig_state_dict def _UpperCamelCase ( ) -> Any: __UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" __UpperCAmelCase : List[str] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ) return im @torch.no_grad() def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=False ) -> Optional[Any]: __UpperCAmelCase : Tuple = get_mobilevit_config(snake_case__ ) # load original state_dict __UpperCAmelCase : str = torch.load(snake_case__, map_location="cpu" ) # load 🤗 model if mobilevit_name.startswith("deeplabv3_" ): __UpperCAmelCase : Optional[int] = MobileViTForSemanticSegmentation(snake_case__ ).eval() else: __UpperCAmelCase : List[Any] = MobileViTForImageClassification(snake_case__ ).eval() __UpperCAmelCase : Dict = convert_state_dict(snake_case__, snake_case__ ) model.load_state_dict(snake_case__ ) # Check outputs on an image, prepared by MobileViTImageProcessor __UpperCAmelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 ) __UpperCAmelCase : Any = image_processor(images=prepare_img(), return_tensors="pt" ) __UpperCAmelCase : Dict = model(**snake_case__ ) __UpperCAmelCase : Tuple = outputs.logits if mobilevit_name.startswith("deeplabv3_" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": __UpperCAmelCase : int = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": __UpperCAmelCase : Tuple = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": __UpperCAmelCase : Any = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ] ) else: raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3, :3, :3], snake_case__, atol=1e-4 ) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": __UpperCAmelCase : str = torch.tensor([-0.9866, 0.2392, -1.1241] ) elif mobilevit_name == "mobilevit_xs": __UpperCAmelCase : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] ) elif mobilevit_name == "mobilevit_xxs": __UpperCAmelCase : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ) else: raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3], snake_case__, atol=1e-4 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case__ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case__ ) if push_to_hub: __UpperCAmelCase : List[str] = { "mobilevit_s": "mobilevit-small", "mobilevit_xs": "mobilevit-x-small", "mobilevit_xxs": "mobilevit-xx-small", "deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small", "deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small", "deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small", } print("Pushing to the hub..." ) __UpperCAmelCase : int = model_mapping[mobilevit_name] image_processor.push_to_hub(snake_case__, organization="apple" ) model.push_to_hub(snake_case__, organization="apple" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--mobilevit_name''', default='''mobilevit_s''', type=str, help=( '''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',''' ''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.''' ), ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _snake_case = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
342
1
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class _snake_case : def __init__( self: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str=13 , __lowerCamelCase: Optional[int]=10 , __lowerCamelCase: int=3 , __lowerCamelCase: List[str]=2 , __lowerCamelCase: Optional[int]=2 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: str=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: List[str]=37 , __lowerCamelCase: int="gelu" , __lowerCamelCase: List[Any]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: str=10 , __lowerCamelCase: Tuple=0.02 , __lowerCamelCase: Any="divided_space_time" , __lowerCamelCase: int=None , ) -> Optional[Any]: __UpperCAmelCase : Any = parent __UpperCAmelCase : Union[str, Any] = batch_size __UpperCAmelCase : str = image_size __UpperCAmelCase : List[Any] = num_channels __UpperCAmelCase : str = patch_size __UpperCAmelCase : List[Any] = num_frames __UpperCAmelCase : List[str] = is_training __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : List[Any] = hidden_size __UpperCAmelCase : Tuple = num_hidden_layers __UpperCAmelCase : Any = num_attention_heads __UpperCAmelCase : Tuple = intermediate_size __UpperCAmelCase : int = hidden_act __UpperCAmelCase : Tuple = hidden_dropout_prob __UpperCAmelCase : Tuple = attention_probs_dropout_prob __UpperCAmelCase : Dict = attention_type __UpperCAmelCase : Dict = initializer_range __UpperCAmelCase : Union[str, Any] = scope __UpperCAmelCase : Any = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __UpperCAmelCase : Optional[int] = (image_size // patch_size) ** 2 __UpperCAmelCase : str = (num_frames) * self.num_patches_per_frame + 1 def _lowerCamelCase ( self: int ) -> Dict: __UpperCAmelCase : Any = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : str = None if self.use_labels: __UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase : List[str] = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self: Optional[int] ) -> Union[str, Any]: __UpperCAmelCase : List[str] = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __UpperCAmelCase : Any = self.num_labels return config def _lowerCamelCase ( self: int , __lowerCamelCase: str , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Dict: __UpperCAmelCase : int = TimesformerModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Any = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict ) -> Any: __UpperCAmelCase : str = TimesformerForVideoClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Tuple = model(__lowerCamelCase ) # verify the logits shape __UpperCAmelCase : Optional[Any] = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __lowerCamelCase ) def _lowerCamelCase ( self: Optional[int] ) -> Optional[int]: __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = config_and_inputs __UpperCAmelCase : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _snake_case ( _lowercase , _lowercase , unittest.TestCase ): lowerCamelCase__: Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowerCamelCase__: Dict = ( {"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification} if is_torch_available() else {} ) lowerCamelCase__: Optional[int] = False lowerCamelCase__: Optional[Any] = False lowerCamelCase__: List[str] = False lowerCamelCase__: Optional[int] = False def _lowerCamelCase ( self: Optional[Any] ) -> Tuple: __UpperCAmelCase : Optional[Any] = TimesformerModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester( self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: int=False ) -> Tuple: __UpperCAmelCase : List[Any] = copy.deepcopy(__lowerCamelCase ) if return_labels: if model_class in get_values(__lowerCamelCase ): __UpperCAmelCase : Optional[int] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def _lowerCamelCase ( self: List[str] ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason="TimeSformer does not use inputs_embeds" ) def _lowerCamelCase ( self: Optional[Any] ) -> Optional[Any]: pass def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]: __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Any = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCAmelCase : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def _lowerCamelCase ( self: int ) -> List[Any]: __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase ) __UpperCAmelCase : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : int = [*signature.parameters.keys()] __UpperCAmelCase : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]: __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _lowerCamelCase ( self: Optional[int] ) -> int: __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__lowerCamelCase ) @slow def _lowerCamelCase ( self: str ) -> Union[str, Any]: for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Dict = TimesformerModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def _lowerCamelCase ( self: Dict ) -> Any: if not self.has_attentions: pass else: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Dict = True for model_class in self.all_model_classes: __UpperCAmelCase : List[Any] = self.model_tester.seq_length __UpperCAmelCase : Union[str, Any] = self.model_tester.num_frames __UpperCAmelCase : List[Any] = True __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : str = True __UpperCAmelCase : Tuple = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : List[str] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) __UpperCAmelCase : int = outputs.attentions self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __UpperCAmelCase : int = True __UpperCAmelCase : Optional[int] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) __UpperCAmelCase : Tuple = outputs.attentions self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __UpperCAmelCase : List[Any] = len(__lowerCamelCase ) # Check attention is always last and order is fine __UpperCAmelCase : Optional[Any] = True __UpperCAmelCase : Tuple = True __UpperCAmelCase : Union[str, Any] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) self.assertEqual(out_len + 1 , len(__lowerCamelCase ) ) __UpperCAmelCase : Optional[int] = outputs.attentions self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def _lowerCamelCase ( self: Optional[Any] ) -> Any: def check_hidden_states_output(__lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str ): __UpperCAmelCase : Optional[int] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) __UpperCAmelCase : Union[str, Any] = outputs.hidden_states __UpperCAmelCase : Optional[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) __UpperCAmelCase : int = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : int = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Dict = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _UpperCamelCase ( ) -> Dict: __UpperCAmelCase : str = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" ) __UpperCAmelCase : Dict = np.load(snake_case__ ) return list(snake_case__ ) @require_torch @require_vision class _snake_case ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self: List[str] ) -> Optional[Any]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _lowerCamelCase ( self: Any ) -> List[str]: __UpperCAmelCase : int = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to( __lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.default_image_processor __UpperCAmelCase : Optional[Any] = prepare_video() __UpperCAmelCase : Any = image_processor(video[:8] , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): __UpperCAmelCase : Dict = model(**__lowerCamelCase ) # verify the logits __UpperCAmelCase : int = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) __UpperCAmelCase : Dict = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
342
import math _snake_case = 10 _snake_case = 7 _snake_case = BALLS_PER_COLOUR * NUM_COLOURS def _UpperCamelCase ( snake_case__ = 20 ) -> str: __UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ ) __UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ ) __UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total) return f'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
342
1
from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class _snake_case ( _lowercase ): lowerCamelCase__: Union[str, Any] = "new-model" if is_tf_available(): class _snake_case ( _lowercase ): lowerCamelCase__: List[str] = NewModelConfig @require_tf class _snake_case ( unittest.TestCase ): @slow def _lowerCamelCase ( self: int ) -> str: __UpperCAmelCase : int = "bert-base-cased" __UpperCAmelCase : str = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = TFAutoModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _lowerCamelCase ( self: str ) -> List[Any]: __UpperCAmelCase : int = "bert-base-cased" __UpperCAmelCase : Dict = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _lowerCamelCase ( self: Dict ) -> Optional[Any]: for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Dict = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(__lowerCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Dict = TFAutoModelForCausalLM.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _lowerCamelCase ( self: Optional[int] ) -> List[Any]: for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : List[str] = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Dict = TFAutoModelWithLMHead.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _lowerCamelCase ( self: Union[str, Any] ) -> Dict: for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Dict = TFAutoModelForMaskedLM.from_pretrained(__lowerCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]: for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : str = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Any = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _lowerCamelCase ( self: Optional[Any] ) -> Optional[Any]: # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __UpperCAmelCase : str = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Tuple = TFAutoModelForSequenceClassification.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _lowerCamelCase ( self: List[str] ) -> List[Any]: # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __UpperCAmelCase : Dict = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Dict = TFAutoModelForQuestionAnswering.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow @require_tensorflow_probability def _lowerCamelCase ( self: str ) -> str: for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: __UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(__lowerCamelCase ) __UpperCAmelCase , __UpperCAmelCase : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained( __lowerCamelCase , output_loading_info=__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def _lowerCamelCase ( self: Optional[int] ) -> Union[str, Any]: __UpperCAmelCase : Any = TFAutoModelWithLMHead.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__lowerCamelCase ) , 1_44_10 ) def _lowerCamelCase ( self: Optional[Any] ) -> Any: __UpperCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__lowerCamelCase ) , 1_44_10 ) def _lowerCamelCase ( self: Any ) -> str: # For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel __UpperCAmelCase : int = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : str = copy.deepcopy(model.config ) __UpperCAmelCase : str = ["FunnelBaseModel"] __UpperCAmelCase : List[str] = TFAutoModel.from_config(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__lowerCamelCase ) __UpperCAmelCase : List[Any] = TFAutoModel.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def _lowerCamelCase ( self: Tuple ) -> Dict: try: AutoConfig.register("new-model" , __lowerCamelCase ) __UpperCAmelCase : List[Any] = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(__lowerCamelCase ): auto_class.register(__lowerCamelCase , __lowerCamelCase ) auto_class.register(__lowerCamelCase , __lowerCamelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowerCamelCase ): auto_class.register(__lowerCamelCase , __lowerCamelCase ) # Now that the config is registered, it can be used as any other config with the auto-API __UpperCAmelCase : int = BertModelTester(self ).get_config() __UpperCAmelCase : Tuple = NewModelConfig(**tiny_config.to_dict() ) __UpperCAmelCase : Optional[int] = auto_class.from_config(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__lowerCamelCase ) __UpperCAmelCase : Any = auto_class.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def _lowerCamelCase ( self: Any ) -> List[Any]: with self.assertRaisesRegex( __lowerCamelCase , "bert-base is not a local folder and is not a valid model identifier" ): __UpperCAmelCase : Tuple = TFAutoModel.from_pretrained("bert-base" ) def _lowerCamelCase ( self: List[str] ) -> Optional[Any]: with self.assertRaisesRegex( __lowerCamelCase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): __UpperCAmelCase : List[str] = TFAutoModel.from_pretrained(__lowerCamelCase , revision="aaaaaa" ) def _lowerCamelCase ( self: List[Any] ) -> Any: with self.assertRaisesRegex( __lowerCamelCase , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ): __UpperCAmelCase : Optional[int] = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" ) def _lowerCamelCase ( self: List[str] ) -> List[Any]: with self.assertRaisesRegex(__lowerCamelCase , "Use `from_pt=True` to load this model" ): __UpperCAmelCase : Union[str, Any] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" ) def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]: # Make sure we have cached the model. __UpperCAmelCase : int = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" ) with RequestCounter() as counter: __UpperCAmelCase : Optional[int] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint __UpperCAmelCase : str = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" ) with RequestCounter() as counter: __UpperCAmelCase : Tuple = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
342
def _UpperCamelCase ( snake_case__ ) -> int: __UpperCAmelCase : int = [0] * len(snake_case__ ) __UpperCAmelCase : Union[str, Any] = [] __UpperCAmelCase : str = [1] * len(snake_case__ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(snake_case__ ) ): if indegree[i] == 0: queue.append(snake_case__ ) while queue: __UpperCAmelCase : List[str] = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: __UpperCAmelCase : str = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(snake_case__ ) print(max(snake_case__ ) ) # Adjacency list of Graph _snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
342
1
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class _snake_case ( _lowercase ): def __init__( self: Optional[Any] , __lowerCamelCase: Any=0.01 , __lowerCamelCase: str=10_00 ) -> Union[str, Any]: __UpperCAmelCase : Union[str, Any] = p_stop __UpperCAmelCase : List[Any] = max_length def __iter__( self: Dict ) -> Optional[Any]: __UpperCAmelCase : str = 0 __UpperCAmelCase : Tuple = False while not stop and count < self.max_length: yield count count += 1 __UpperCAmelCase : Dict = random.random() < self.p_stop class _snake_case ( unittest.TestCase ): def _lowerCamelCase ( self: str , __lowerCamelCase: Any , __lowerCamelCase: str , __lowerCamelCase: Dict=False , __lowerCamelCase: List[Any]=True ) -> List[Any]: __UpperCAmelCase : Optional[Any] = [ BatchSamplerShard(__lowerCamelCase , 2 , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase ) for i in range(2 ) ] __UpperCAmelCase : List[Any] = [list(__lowerCamelCase ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(__lowerCamelCase ) for shard in batch_sampler_shards] , [len(__lowerCamelCase ) for e in expected] ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def _lowerCamelCase ( self: int ) -> int: # Check the shards when the dataset is a round multiple of total batch size. __UpperCAmelCase : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase ) __UpperCAmelCase : List[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : int = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __UpperCAmelCase : List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase ) __UpperCAmelCase : int = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Dict = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase ) __UpperCAmelCase : int = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __UpperCAmelCase : str = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase ) __UpperCAmelCase : List[str] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Dict = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __UpperCAmelCase : Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Tuple = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : str = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Tuple = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase ) # Check the shards when the dataset is very small. __UpperCAmelCase : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Tuple = [[], []] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase ) def _lowerCamelCase ( self: Optional[int] ) -> List[Any]: # Check the shards when the dataset is a round multiple of batch size. __UpperCAmelCase : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase ) __UpperCAmelCase : Optional[int] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase ) # Check the shards when the dataset is not a round multiple of batch size. __UpperCAmelCase : str = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Any = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase ) __UpperCAmelCase : int = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __UpperCAmelCase : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Optional[int] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase ) __UpperCAmelCase : int = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Any = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase ) # Check the shards when the dataset is very small. __UpperCAmelCase : Dict = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase ) __UpperCAmelCase : int = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase ) __UpperCAmelCase : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Any = [[], []] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase ) def _lowerCamelCase ( self: List[Any] ) -> Optional[int]: # Check the shards when the dataset is a round multiple of total batch size. __UpperCAmelCase : Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase ) __UpperCAmelCase : List[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase ) __UpperCAmelCase : int = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __UpperCAmelCase : Any = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase ) __UpperCAmelCase : List[str] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase ) __UpperCAmelCase : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase ) __UpperCAmelCase : List[str] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __UpperCAmelCase : List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Any = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase ) __UpperCAmelCase : Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Tuple = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __UpperCAmelCase : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Tuple = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Optional[int] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase ) # Check the shards when the dataset is very small. __UpperCAmelCase : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Dict = [[[0, 1]], []] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase ) __UpperCAmelCase : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Optional[int] = [[], []] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase ) def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]: # Check the shards when the dataset is a round multiple of batch size. __UpperCAmelCase : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase ) __UpperCAmelCase : List[str] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase ) # Check the shards when the dataset is not a round multiple of batch size. __UpperCAmelCase : str = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase ) __UpperCAmelCase : Tuple = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Dict = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __UpperCAmelCase : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase ) __UpperCAmelCase : Optional[int] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase ) __UpperCAmelCase : Tuple = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase ) __UpperCAmelCase : List[str] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase ) # Check the shards when the dataset is very small. __UpperCAmelCase : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase ) __UpperCAmelCase : str = [[[0, 1]], []] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase ) __UpperCAmelCase : List[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase ) __UpperCAmelCase : str = [[], []] self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase ) def _lowerCamelCase ( self: Dict ) -> Optional[int]: __UpperCAmelCase : int = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] __UpperCAmelCase : Union[str, Any] = [BatchSamplerShard(__lowerCamelCase , 2 , __lowerCamelCase , even_batches=__lowerCamelCase ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: List[Any]=False , __lowerCamelCase: List[Any]=2 , __lowerCamelCase: Optional[int]=False ) -> int: random.seed(__lowerCamelCase ) __UpperCAmelCase : Tuple = list(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = [ IterableDatasetShard( __lowerCamelCase , batch_size=__lowerCamelCase , drop_last=__lowerCamelCase , num_processes=__lowerCamelCase , process_index=__lowerCamelCase , split_batches=__lowerCamelCase , ) for i in range(__lowerCamelCase ) ] __UpperCAmelCase : List[Any] = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(__lowerCamelCase ) iterable_dataset_lists.append(list(__lowerCamelCase ) ) __UpperCAmelCase : Optional[Any] = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size __UpperCAmelCase : Tuple = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) self.assertTrue(len(__lowerCamelCase ) % shard_batch_size == 0 ) __UpperCAmelCase : List[Any] = [] for idx in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(__lowerCamelCase ) < len(__lowerCamelCase ): reference += reference self.assertListEqual(__lowerCamelCase , reference[: len(__lowerCamelCase )] ) def _lowerCamelCase ( self: List[str] ) -> Union[str, Any]: __UpperCAmelCase : List[str] = 42 __UpperCAmelCase : int = RandomIterableDataset() self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase ) self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase ) self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase ) self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase ) # Edge case with a very small dataset __UpperCAmelCase : Optional[int] = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase ) self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase ) self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase ) self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase ) def _lowerCamelCase ( self: Optional[Any] ) -> Tuple: __UpperCAmelCase : List[str] = BatchSampler(range(16 ) , batch_size=4 , drop_last=__lowerCamelCase ) __UpperCAmelCase : str = SkipBatchSampler(__lowerCamelCase , 2 ) self.assertListEqual(list(__lowerCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _lowerCamelCase ( self: int ) -> int: __UpperCAmelCase : int = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _lowerCamelCase ( self: Dict ) -> str: __UpperCAmelCase : Union[str, Any] = DataLoader(list(range(16 ) ) , batch_size=4 ) __UpperCAmelCase : Any = skip_first_batches(__lowerCamelCase , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _lowerCamelCase ( self: List[Any] ) -> Any: __UpperCAmelCase : str = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(__lowerCamelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__lowerCamelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def _lowerCamelCase ( self: int ) -> List[str]: Accelerator() __UpperCAmelCase : List[str] = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(__lowerCamelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__lowerCamelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
342
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _snake_case = { '''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''], '''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''], '''processing_whisper''': ['''WhisperProcessor'''], '''tokenization_whisper''': ['''WhisperTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''WhisperTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WhisperForConditionalGeneration''', '''WhisperModel''', '''WhisperPreTrainedModel''', '''WhisperForAudioClassification''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWhisperForConditionalGeneration''', '''TFWhisperModel''', '''TFWhisperPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''FlaxWhisperForConditionalGeneration''', '''FlaxWhisperModel''', '''FlaxWhisperPreTrainedModel''', '''FlaxWhisperForAudioClassification''', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
342
1
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class _snake_case ( _lowercase , _lowercase ): @register_to_config def __init__( self: int , __lowerCamelCase: int = 1_28 , __lowerCamelCase: int = 2_56 , __lowerCamelCase: float = 20_00.0 , __lowerCamelCase: int = 7_68 , __lowerCamelCase: int = 12 , __lowerCamelCase: int = 12 , __lowerCamelCase: int = 64 , __lowerCamelCase: int = 20_48 , __lowerCamelCase: float = 0.1 , ) -> int: super().__init__() __UpperCAmelCase : Optional[int] = nn.Sequential( nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , ) __UpperCAmelCase : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : List[str] = False __UpperCAmelCase : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) __UpperCAmelCase : str = nn.Dropout(p=__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = nn.ModuleList() for lyr_num in range(__lowerCamelCase ): # FiLM conditional T5 decoder __UpperCAmelCase : Union[str, Any] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase ) self.decoders.append(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = TaLayerNorm(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = nn.Dropout(p=__lowerCamelCase ) __UpperCAmelCase : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] ) -> Dict: __UpperCAmelCase : str = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ) -> Optional[Any]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. __UpperCAmelCase : Any = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) __UpperCAmelCase : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) __UpperCAmelCase : List[str] = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. __UpperCAmelCase : Any = torch.broadcast_to( torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , ) __UpperCAmelCase : Union[str, Any] = self.position_encoding(__lowerCamelCase ) __UpperCAmelCase : List[Any] = self.continuous_inputs_projection(__lowerCamelCase ) inputs += position_encodings __UpperCAmelCase : Optional[Any] = self.dropout(__lowerCamelCase ) # decoder: No padding present. __UpperCAmelCase : Tuple = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. __UpperCAmelCase : int = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks] # cross attend style: concat encodings __UpperCAmelCase : List[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) __UpperCAmelCase : Dict = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: __UpperCAmelCase : List[str] = lyr( __lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0] __UpperCAmelCase : List[str] = self.decoder_norm(__lowerCamelCase ) __UpperCAmelCase : List[Any] = self.post_dropout(__lowerCamelCase ) __UpperCAmelCase : Tuple = self.spec_out(__lowerCamelCase ) return spec_out class _snake_case ( nn.Module ): def __init__( self: Tuple , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[Any] , __lowerCamelCase: str=1e-6 ) -> int: super().__init__() __UpperCAmelCase : Optional[int] = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: int=None , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Dict=None , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: List[Any]=None , ) -> int: __UpperCAmelCase : int = self.layer[0]( __lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , ) if encoder_hidden_states is not None: __UpperCAmelCase : Union[str, Any] = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to( encoder_hidden_states.dtype ) __UpperCAmelCase : Tuple = self.layer[1]( __lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , ) # Apply Film Conditional Feed Forward layer __UpperCAmelCase : List[str] = self.layer[-1](__lowerCamelCase , __lowerCamelCase ) return (hidden_states,) class _snake_case ( nn.Module ): def __init__( self: List[str] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any ) -> Optional[Any]: super().__init__() __UpperCAmelCase : Tuple = TaLayerNorm(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase ) __UpperCAmelCase : Optional[int] = nn.Dropout(__lowerCamelCase ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: str=None , __lowerCamelCase: Any=None , ) -> Dict: # pre_self_attention_layer_norm __UpperCAmelCase : Any = self.layer_norm(__lowerCamelCase ) if conditioning_emb is not None: __UpperCAmelCase : str = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase ) # Self-attention block __UpperCAmelCase : List[Any] = self.attention(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = hidden_states + self.dropout(__lowerCamelCase ) return hidden_states class _snake_case ( nn.Module ): def __init__( self: str , __lowerCamelCase: Dict , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: Dict ) -> Union[str, Any]: super().__init__() __UpperCAmelCase : Dict = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase ) __UpperCAmelCase : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase ) __UpperCAmelCase : int = nn.Dropout(__lowerCamelCase ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Union[str, Any]=None , ) -> Optional[int]: __UpperCAmelCase : int = self.layer_norm(__lowerCamelCase ) __UpperCAmelCase : Tuple = self.attention( __lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , ) __UpperCAmelCase : int = hidden_states + self.dropout(__lowerCamelCase ) return layer_output class _snake_case ( nn.Module ): def __init__( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] ) -> Union[str, Any]: super().__init__() __UpperCAmelCase : Union[str, Any] = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase ) __UpperCAmelCase : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase ) __UpperCAmelCase : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase ) __UpperCAmelCase : List[Any] = nn.Dropout(__lowerCamelCase ) def _lowerCamelCase ( self: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: Any=None ) -> Any: __UpperCAmelCase : Tuple = self.layer_norm(__lowerCamelCase ) if conditioning_emb is not None: __UpperCAmelCase : Any = self.film(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Tuple = self.DenseReluDense(__lowerCamelCase ) __UpperCAmelCase : Dict = hidden_states + self.dropout(__lowerCamelCase ) return hidden_states class _snake_case ( nn.Module ): def __init__( self: Any , __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: Any ) -> Dict: super().__init__() __UpperCAmelCase : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) __UpperCAmelCase : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = nn.Dropout(__lowerCamelCase ) __UpperCAmelCase : str = NewGELUActivation() def _lowerCamelCase ( self: Any , __lowerCamelCase: Union[str, Any] ) -> int: __UpperCAmelCase : Optional[int] = self.act(self.wi_a(__lowerCamelCase ) ) __UpperCAmelCase : Any = self.wi_a(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = hidden_gelu * hidden_linear __UpperCAmelCase : Any = self.dropout(__lowerCamelCase ) __UpperCAmelCase : Any = self.wo(__lowerCamelCase ) return hidden_states class _snake_case ( nn.Module ): def __init__( self: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any]=1e-6 ) -> Optional[int]: super().__init__() __UpperCAmelCase : Tuple = nn.Parameter(torch.ones(__lowerCamelCase ) ) __UpperCAmelCase : Optional[Any] = eps def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Optional[Any] ) -> List[str]: # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 __UpperCAmelCase : int = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase ) __UpperCAmelCase : Optional[int] = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: __UpperCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class _snake_case ( nn.Module ): def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: torch.Tensor ) -> torch.Tensor: return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(__lowerCamelCase , 3.0 )) )) class _snake_case ( nn.Module ): def __init__( self: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ) -> Optional[int]: super().__init__() __UpperCAmelCase : Optional[int] = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase ) def _lowerCamelCase ( self: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple ) -> Dict: __UpperCAmelCase : int = self.scale_bias(__lowerCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.chunk(__lowerCamelCase , 2 , -1 ) __UpperCAmelCase : List[str] = x * (1 + scale) + shift return x
342
from __future__ import annotations from math import pi def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]: if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
342
1
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger _snake_case = get_logger(__name__) _snake_case = r''' Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. ''' class _snake_case : @add_start_docstrings(__lowerCamelCase ) def __call__( self: Union[str, Any] , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray ) -> jnp.ndarray: raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class _snake_case : @add_start_docstrings(__lowerCamelCase ) def __call__( self: Optional[Any] , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray ) -> jnp.ndarray: raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class _snake_case ( _lowercase ): @add_start_docstrings(__lowerCamelCase ) def __call__( self: Dict , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: int , **__lowerCamelCase: List[Any] ) -> jnp.ndarray: for processor in self: __UpperCAmelCase : str = inspect.signature(processor.__call__ ).parameters if len(__lowerCamelCase ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' f'''{processor.__class__} are passed to the logits processor.''' ) __UpperCAmelCase : int = processor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) else: __UpperCAmelCase : Union[str, Any] = processor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return scores class _snake_case ( _lowercase ): def __init__( self: int , __lowerCamelCase: float ) -> Tuple: if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not (temperature > 0): raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' ) __UpperCAmelCase : Any = temperature def __call__( self: List[str] , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: int ) -> jnp.ndarray: __UpperCAmelCase : List[str] = scores / self.temperature return scores class _snake_case ( _lowercase ): def __init__( self: str , __lowerCamelCase: float , __lowerCamelCase: float = -float("Inf" ) , __lowerCamelCase: int = 1 ) -> List[str]: if not isinstance(__lowerCamelCase , __lowerCamelCase ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(__lowerCamelCase , __lowerCamelCase ) or (min_tokens_to_keep < 1): raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) __UpperCAmelCase : Optional[Any] = top_p __UpperCAmelCase : str = filter_value __UpperCAmelCase : List[Any] = min_tokens_to_keep def __call__( self: Dict , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: int ) -> jnp.ndarray: __UpperCAmelCase , __UpperCAmelCase : List[Any] = lax.top_k(__lowerCamelCase , scores.shape[-1] ) __UpperCAmelCase : Any = jnp.full_like(__lowerCamelCase , self.filter_value ) __UpperCAmelCase : Any = jax.nn.softmax(__lowerCamelCase , axis=-1 ).cumsum(axis=-1 ) __UpperCAmelCase : List[str] = cumulative_probs < self.top_p # include the token that is higher than top_p as well __UpperCAmelCase : List[Any] = jnp.roll(__lowerCamelCase , 1 ) score_mask |= score_mask.at[:, 0].set(__lowerCamelCase ) # min tokens to keep __UpperCAmelCase : int = score_mask.at[:, : self.min_tokens_to_keep].set(__lowerCamelCase ) __UpperCAmelCase : int = jnp.where(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = jax.lax.sort_key_val(__lowerCamelCase , __lowerCamelCase )[-1] return next_scores class _snake_case ( _lowercase ): def __init__( self: str , __lowerCamelCase: int , __lowerCamelCase: float = -float("Inf" ) , __lowerCamelCase: int = 1 ) -> Optional[int]: if not isinstance(__lowerCamelCase , __lowerCamelCase ) or top_k <= 0: raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) __UpperCAmelCase : List[str] = max(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : List[Any] = filter_value def __call__( self: int , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: int ) -> jnp.ndarray: __UpperCAmelCase , __UpperCAmelCase : List[Any] = scores.shape __UpperCAmelCase : Dict = jnp.full(batch_size * vocab_size , self.filter_value ) __UpperCAmelCase : Tuple = min(self.top_k , scores.shape[-1] ) # Safety check __UpperCAmelCase , __UpperCAmelCase : Dict = lax.top_k(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = jnp.broadcast_to((jnp.arange(__lowerCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() __UpperCAmelCase : Union[str, Any] = topk_scores.flatten() __UpperCAmelCase : Dict = topk_indices.flatten() + shift __UpperCAmelCase : List[Any] = next_scores_flat.at[topk_indices_flat].set(__lowerCamelCase ) __UpperCAmelCase : Any = next_scores_flat.reshape(__lowerCamelCase , __lowerCamelCase ) return next_scores class _snake_case ( _lowercase ): def __init__( self: Any , __lowerCamelCase: int ) -> Tuple: __UpperCAmelCase : Optional[int] = bos_token_id def __call__( self: Dict , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: int ) -> jnp.ndarray: __UpperCAmelCase : str = jnp.full(scores.shape , -float("inf" ) ) __UpperCAmelCase : str = 1 - jnp.bool_(cur_len - 1 ) __UpperCAmelCase : Dict = jnp.where(__lowerCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , __lowerCamelCase ) return scores class _snake_case ( _lowercase ): def __init__( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> Optional[int]: __UpperCAmelCase : List[str] = max_length __UpperCAmelCase : Union[str, Any] = eos_token_id def __call__( self: Optional[Any] , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: int ) -> jnp.ndarray: __UpperCAmelCase : Tuple = jnp.full(scores.shape , -float("inf" ) ) __UpperCAmelCase : Any = 1 - jnp.bool_(cur_len - self.max_length + 1 ) __UpperCAmelCase : str = jnp.where(__lowerCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , __lowerCamelCase ) return scores class _snake_case ( _lowercase ): def __init__( self: Tuple , __lowerCamelCase: int , __lowerCamelCase: int ) -> str: if not isinstance(__lowerCamelCase , __lowerCamelCase ) or min_length < 0: raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(__lowerCamelCase , __lowerCamelCase ) or eos_token_id < 0: raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) __UpperCAmelCase : List[str] = min_length __UpperCAmelCase : int = eos_token_id def __call__( self: Any , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: int ) -> jnp.ndarray: # create boolean flag to decide if min length penalty should be applied __UpperCAmelCase : List[Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) __UpperCAmelCase : Union[str, Any] = jnp.where(__lowerCamelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , __lowerCamelCase ) return scores class _snake_case ( _lowercase ): def __init__( self: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: List[str] ) -> List[Any]: __UpperCAmelCase : Optional[Any] = list(__lowerCamelCase ) __UpperCAmelCase : Any = begin_index def __call__( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int ) -> List[Any]: __UpperCAmelCase : List[Any] = 1 - jnp.bool_(cur_len - self.begin_index ) __UpperCAmelCase : Union[str, Any] = jnp.where(__lowerCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , __lowerCamelCase ) return scores class _snake_case ( _lowercase ): def __init__( self: List[str] , __lowerCamelCase: list ) -> List[str]: __UpperCAmelCase : Union[str, Any] = list(__lowerCamelCase ) def __call__( self: Union[str, Any] , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: int ) -> jnp.ndarray: __UpperCAmelCase : Optional[Any] = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class _snake_case ( _lowercase ): def __init__( self: Tuple , __lowerCamelCase: Optional[Any] ) -> List[str]: __UpperCAmelCase : Tuple = dict(__lowerCamelCase ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. __UpperCAmelCase : Tuple = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: __UpperCAmelCase : List[Any] = force_token_array.at[index].set(__lowerCamelCase ) __UpperCAmelCase : Tuple = jnp.intaa(__lowerCamelCase ) def __call__( self: int , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: int ) -> jnp.ndarray: def _force_token(__lowerCamelCase: Optional[int] ): __UpperCAmelCase : Optional[Any] = scores.shape[0] __UpperCAmelCase : List[str] = self.force_token_array[generation_idx] __UpperCAmelCase : List[Any] = jnp.ones_like(__lowerCamelCase , dtype=scores.dtype ) * -float("inf" ) __UpperCAmelCase : List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) __UpperCAmelCase : Tuple = lax.dynamic_update_slice(__lowerCamelCase , __lowerCamelCase , (0, current_token) ) return new_scores __UpperCAmelCase : str = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(__lowerCamelCase ) , lambda: scores , ) , ) return scores class _snake_case ( _lowercase ): def __init__( self: Dict , __lowerCamelCase: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] ) -> str: __UpperCAmelCase : str = generate_config.eos_token_id __UpperCAmelCase : Any = generate_config.no_timestamps_token_id __UpperCAmelCase : List[str] = generate_config.no_timestamps_token_id + 1 __UpperCAmelCase : Tuple = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(__lowerCamelCase , "max_initial_timestamp_index" ): __UpperCAmelCase : Any = generate_config.max_initial_timestamp_index else: __UpperCAmelCase : Any = model_config.vocab_size if self.max_initial_timestamp_index is None: __UpperCAmelCase : int = model_config.vocab_size def __call__( self: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any] ) -> str: # suppress <|notimestamps|> which is handled by without_timestamps __UpperCAmelCase : Optional[Any] = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(__lowerCamelCase: Tuple , __lowerCamelCase: List[str] ): __UpperCAmelCase : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , __lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : str = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __lowerCamelCase , ) __UpperCAmelCase : Union[str, Any] = jnp.where((cur_len - self.begin_index) < 2 , __lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Any = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , __lowerCamelCase , __lowerCamelCase , ) return jnp.where( __lowerCamelCase , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , __lowerCamelCase , ) __UpperCAmelCase : Dict = jax.vmap(__lowerCamelCase )(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : str = jnp.where(cur_len == self.begin_index , __lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : str = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __lowerCamelCase , ) __UpperCAmelCase : str = self.timestamp_begin + self.max_initial_timestamp_index __UpperCAmelCase : List[str] = jnp.where( __lowerCamelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , __lowerCamelCase , ) # if sum of probability over timestamps is above any other token, sample timestamp __UpperCAmelCase : Dict = jax.nn.log_softmax(__lowerCamelCase , axis=-1 ) def handle_cumulative_probs(__lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple ): __UpperCAmelCase : int = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) __UpperCAmelCase : int = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , __lowerCamelCase , ) __UpperCAmelCase : Optional[int] = jax.vmap(__lowerCamelCase )(__lowerCamelCase , __lowerCamelCase ) return scores
342
import flax.linen as nn import jax import jax.numpy as jnp class _snake_case ( nn.Module ): lowerCamelCase__: int lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]: __UpperCAmelCase : List[str] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Optional[Any] , __lowerCamelCase: Optional[int] ) -> List[Any]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = hidden_states.shape __UpperCAmelCase : Dict = jax.image.resize( __lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , ) __UpperCAmelCase : Dict = self.conv(__lowerCamelCase ) return hidden_states class _snake_case ( nn.Module ): lowerCamelCase__: int lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: str ) -> Any: __UpperCAmelCase : Optional[int] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Dict , __lowerCamelCase: str ) -> List[Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) __UpperCAmelCase : Any = self.conv(__lowerCamelCase ) return hidden_states class _snake_case ( nn.Module ): lowerCamelCase__: int lowerCamelCase__: int = None lowerCamelCase__: float = 0.0 lowerCamelCase__: bool = None lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: str ) -> List[str]: __UpperCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels __UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) __UpperCAmelCase : List[str] = nn.Conv( __lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __UpperCAmelCase : Optional[Any] = nn.Dense(__lowerCamelCase , dtype=self.dtype ) __UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) __UpperCAmelCase : Optional[Any] = nn.Dropout(self.dropout_prob ) __UpperCAmelCase : Tuple = nn.Conv( __lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __UpperCAmelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __UpperCAmelCase : List[Any] = None if use_nin_shortcut: __UpperCAmelCase : Dict = nn.Conv( __lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , ) def __call__( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=True ) -> List[Any]: __UpperCAmelCase : Dict = hidden_states __UpperCAmelCase : int = self.norma(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = nn.swish(__lowerCamelCase ) __UpperCAmelCase : Tuple = self.conva(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) ) __UpperCAmelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 ) __UpperCAmelCase : List[str] = hidden_states + temb __UpperCAmelCase : Union[str, Any] = self.norma(__lowerCamelCase ) __UpperCAmelCase : Tuple = nn.swish(__lowerCamelCase ) __UpperCAmelCase : str = self.dropout(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : List[str] = self.conva(__lowerCamelCase ) if self.conv_shortcut is not None: __UpperCAmelCase : Optional[int] = self.conv_shortcut(__lowerCamelCase ) return hidden_states + residual
342
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''microsoft/trocr-base-handwritten''': ( '''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json''' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class _snake_case ( _lowercase ): lowerCamelCase__: Dict = "trocr" lowerCamelCase__: List[str] = ["past_key_values"] lowerCamelCase__: Any = { "num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model", "num_hidden_layers": "decoder_layers", } def __init__( self: Tuple , __lowerCamelCase: List[str]=5_02_65 , __lowerCamelCase: List[str]=10_24 , __lowerCamelCase: List[str]=12 , __lowerCamelCase: Optional[int]=16 , __lowerCamelCase: Optional[Any]=40_96 , __lowerCamelCase: Any="gelu" , __lowerCamelCase: Optional[int]=5_12 , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: List[str]=0.0 , __lowerCamelCase: List[str]=0.0 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: List[Any]=0.02 , __lowerCamelCase: int=0.0 , __lowerCamelCase: List[str]=True , __lowerCamelCase: Union[str, Any]=False , __lowerCamelCase: List[Any]=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=1 , __lowerCamelCase: Tuple=0 , __lowerCamelCase: Any=2 , **__lowerCamelCase: Dict , ) -> Any: __UpperCAmelCase : int = vocab_size __UpperCAmelCase : int = d_model __UpperCAmelCase : Tuple = decoder_layers __UpperCAmelCase : Union[str, Any] = decoder_attention_heads __UpperCAmelCase : Optional[Any] = decoder_ffn_dim __UpperCAmelCase : Optional[int] = activation_function __UpperCAmelCase : int = max_position_embeddings __UpperCAmelCase : str = dropout __UpperCAmelCase : List[Any] = attention_dropout __UpperCAmelCase : Union[str, Any] = activation_dropout __UpperCAmelCase : List[str] = init_std __UpperCAmelCase : Optional[Any] = decoder_layerdrop __UpperCAmelCase : List[Any] = use_cache __UpperCAmelCase : Union[str, Any] = scale_embedding __UpperCAmelCase : str = use_learned_position_embeddings __UpperCAmelCase : Tuple = layernorm_embedding super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
342
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss _snake_case = pytest.mark.integration @require_faiss class _snake_case ( _lowercase ): def _lowerCamelCase ( self: Union[str, Any] ) -> str: __UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} ) return dset def _lowerCamelCase ( self: Optional[Any] ) -> Tuple: import faiss __UpperCAmelCase : Dataset = self._create_dummy_dataset() __UpperCAmelCase : int = dset.map( lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase ) __UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT ) __UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def _lowerCamelCase ( self: List[str] ) -> int: import faiss __UpperCAmelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def _lowerCamelCase ( self: Optional[int] ) -> Dict: import faiss __UpperCAmelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def _lowerCamelCase ( self: List[Any] ) -> List[Any]: __UpperCAmelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def _lowerCamelCase ( self: List[str] ) -> Dict: from elasticsearch import Elasticsearch __UpperCAmelCase : Dataset = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: __UpperCAmelCase : int = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) __UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} __UpperCAmelCase : Any = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class _snake_case ( _lowercase ): def _lowerCamelCase ( self: List[str] ) -> Optional[int]: import faiss __UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa ) __UpperCAmelCase : List[str] = 1 __UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase ) self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1] __UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase ) self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] ) __UpperCAmelCase : Dict = [scores[0] for scores in total_scores] __UpperCAmelCase : int = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCamelCase ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase ) def _lowerCamelCase ( self: Any ) -> List[str]: import faiss __UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(__lowerCamelCase ): __UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def _lowerCamelCase ( self: List[str] ) -> Dict: import faiss __UpperCAmelCase : str = faiss.IndexFlat(5 ) __UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def _lowerCamelCase ( self: Union[str, Any] ) -> int: import faiss __UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file: index.save(tmp_file.name ) __UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa ) __UpperCAmelCase : Tuple = 1 __UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def _UpperCamelCase ( snake_case__ ) -> Optional[Any]: import faiss __UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5, dtype=np.floataa ) ) __UpperCAmelCase : Optional[Any] = "index.faiss" __UpperCAmelCase : Optional[int] = f'''mock://{index_name}''' index.save(snake_case__, storage_options=mockfs.storage_options ) __UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options ) __UpperCAmelCase : str = np.zeros(5, dtype=np.floataa ) __UpperCAmelCase : Any = 1 __UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _snake_case ( _lowercase ): def _lowerCamelCase ( self: str ) -> Union[str, Any]: from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: __UpperCAmelCase : Optional[Any] = Elasticsearch() __UpperCAmelCase : Dict = {"acknowledged": True} __UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query __UpperCAmelCase : Dict = "foo" __UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} __UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __UpperCAmelCase : int = "foo" __UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __UpperCAmelCase : int = ["foo", "bar", "foobar"] __UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} __UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase ) __UpperCAmelCase : Tuple = [scores[0] for scores in total_scores] __UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCamelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __lowerCamelCase ) # batched queries with timeout __UpperCAmelCase : str = ["foo", "bar", "foobar"] __UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 ) __UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores] __UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCamelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __lowerCamelCase )
342
1
def _UpperCamelCase ( snake_case__ ) -> int: if not isinstance(snake_case__, snake_case__ ): raise ValueError("Input must be an integer" ) if input_num <= 0: raise ValueError("Input must be positive" ) return sum( divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
342
import argparse import struct import unittest class _snake_case : def __init__( self: Tuple , __lowerCamelCase: bytes ) -> None: __UpperCAmelCase : Tuple = data # Initialize hash values __UpperCAmelCase : Any = [ 0x6_A_0_9_E_6_6_7, 0xB_B_6_7_A_E_8_5, 0x3_C_6_E_F_3_7_2, 0xA_5_4_F_F_5_3_A, 0x5_1_0_E_5_2_7_F, 0x9_B_0_5_6_8_8_C, 0x1_F_8_3_D_9_A_B, 0x5_B_E_0_C_D_1_9, ] # Initialize round constants __UpperCAmelCase : Dict = [ 0x4_2_8_A_2_F_9_8, 0x7_1_3_7_4_4_9_1, 0xB_5_C_0_F_B_C_F, 0xE_9_B_5_D_B_A_5, 0x3_9_5_6_C_2_5_B, 0x5_9_F_1_1_1_F_1, 0x9_2_3_F_8_2_A_4, 0xA_B_1_C_5_E_D_5, 0xD_8_0_7_A_A_9_8, 0x1_2_8_3_5_B_0_1, 0x2_4_3_1_8_5_B_E, 0x5_5_0_C_7_D_C_3, 0x7_2_B_E_5_D_7_4, 0x8_0_D_E_B_1_F_E, 0x9_B_D_C_0_6_A_7, 0xC_1_9_B_F_1_7_4, 0xE_4_9_B_6_9_C_1, 0xE_F_B_E_4_7_8_6, 0x0_F_C_1_9_D_C_6, 0x2_4_0_C_A_1_C_C, 0x2_D_E_9_2_C_6_F, 0x4_A_7_4_8_4_A_A, 0x5_C_B_0_A_9_D_C, 0x7_6_F_9_8_8_D_A, 0x9_8_3_E_5_1_5_2, 0xA_8_3_1_C_6_6_D, 0xB_0_0_3_2_7_C_8, 0xB_F_5_9_7_F_C_7, 0xC_6_E_0_0_B_F_3, 0xD_5_A_7_9_1_4_7, 0x0_6_C_A_6_3_5_1, 0x1_4_2_9_2_9_6_7, 0x2_7_B_7_0_A_8_5, 0x2_E_1_B_2_1_3_8, 0x4_D_2_C_6_D_F_C, 0x5_3_3_8_0_D_1_3, 0x6_5_0_A_7_3_5_4, 0x7_6_6_A_0_A_B_B, 0x8_1_C_2_C_9_2_E, 0x9_2_7_2_2_C_8_5, 0xA_2_B_F_E_8_A_1, 0xA_8_1_A_6_6_4_B, 0xC_2_4_B_8_B_7_0, 0xC_7_6_C_5_1_A_3, 0xD_1_9_2_E_8_1_9, 0xD_6_9_9_0_6_2_4, 0xF_4_0_E_3_5_8_5, 0x1_0_6_A_A_0_7_0, 0x1_9_A_4_C_1_1_6, 0x1_E_3_7_6_C_0_8, 0x2_7_4_8_7_7_4_C, 0x3_4_B_0_B_C_B_5, 0x3_9_1_C_0_C_B_3, 0x4_E_D_8_A_A_4_A, 0x5_B_9_C_C_A_4_F, 0x6_8_2_E_6_F_F_3, 0x7_4_8_F_8_2_E_E, 0x7_8_A_5_6_3_6_F, 0x8_4_C_8_7_8_1_4, 0x8_C_C_7_0_2_0_8, 0x9_0_B_E_F_F_F_A, 0xA_4_5_0_6_C_E_B, 0xB_E_F_9_A_3_F_7, 0xC_6_7_1_7_8_F_2, ] __UpperCAmelCase : List[Any] = self.preprocessing(self.data ) self.final_hash() @staticmethod def _lowerCamelCase ( __lowerCamelCase: bytes ) -> bytes: __UpperCAmelCase : List[str] = B"\x80" + (B"\x00" * (63 - (len(__lowerCamelCase ) + 8) % 64)) __UpperCAmelCase : int = struct.pack(">Q" , (len(__lowerCamelCase ) * 8) ) return data + padding + big_endian_integer def _lowerCamelCase ( self: Dict ) -> None: # Convert into blocks of 64 bytes __UpperCAmelCase : Dict = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __UpperCAmelCase : List[str] = list(struct.unpack(">16L" , __lowerCamelCase ) ) # add 48 0-ed integers words += [0] * 48 __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array __UpperCAmelCase : Union[str, Any] = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) __UpperCAmelCase : str = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) __UpperCAmelCase : Union[str, Any] = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_0_0_0_0_0_0_0_0 # Compression __UpperCAmelCase : Union[str, Any] = self.ror(__lowerCamelCase , 6 ) ^ self.ror(__lowerCamelCase , 11 ) ^ self.ror(__lowerCamelCase , 25 ) __UpperCAmelCase : Tuple = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g) __UpperCAmelCase : int = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_0_0_0_0_0_0_0_0 __UpperCAmelCase : List[Any] = self.ror(__lowerCamelCase , 2 ) ^ self.ror(__lowerCamelCase , 13 ) ^ self.ror(__lowerCamelCase , 22 ) __UpperCAmelCase : Dict = (a & b) ^ (a & c) ^ (b & c) __UpperCAmelCase : int = (sa + maj) % 0x1_0_0_0_0_0_0_0_0 __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = ( g, f, e, ((d + tempa) % 0x1_0_0_0_0_0_0_0_0), c, b, a, ((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0), ) __UpperCAmelCase : Optional[int] = [a, b, c, d, e, f, g, h] # Modify final values __UpperCAmelCase : List[str] = [ ((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0) for index, element in enumerate(self.hashes ) ] __UpperCAmelCase : int = "".join([hex(__lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> int: return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations) class _snake_case ( unittest.TestCase ): def _lowerCamelCase ( self: List[Any] ) -> None: import hashlib __UpperCAmelCase : Dict = bytes("Test String" , "utf-8" ) self.assertEqual(SHAaaa(__lowerCamelCase ).hash , hashlib.shaaaa(__lowerCamelCase ).hexdigest() ) def _UpperCamelCase ( ) -> None: import doctest doctest.testmod() __UpperCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument( "-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", ) parser.add_argument( "-f", "--file", dest="input_file", help="Hash contents of a file" ) __UpperCAmelCase : List[Any] = parser.parse_args() __UpperCAmelCase : Optional[int] = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file, "rb" ) as f: __UpperCAmelCase : List[str] = f.read() else: __UpperCAmelCase : List[Any] = bytes(snake_case__, "utf-8" ) print(SHAaaa(snake_case__ ).hash ) if __name__ == "__main__": main()
342
1
import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging _snake_case = logging.get_logger(__name__) def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=False ) -> List[str]: try: import torch # noqa: F401 except ImportError: logger.error( "Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see" " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" " instructions." ) raise if not is_sharded: __UpperCAmelCase : Tuple = os.path.abspath(snake_case__ ) logger.info(f'''Loading PyTorch weights from {pt_path}''' ) __UpperCAmelCase : Tuple = torch.load(snake_case__, map_location="cpu" ) logger.info(f'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) __UpperCAmelCase : int = convert_pytorch_state_dict_to_flax(snake_case__, snake_case__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files __UpperCAmelCase : List[Any] = convert_pytorch_sharded_state_dict_to_flax(snake_case__, snake_case__ ) return flax_state_dict def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, ) -> (Tuple[str], np.ndarray): def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool: return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm __UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean __UpperCAmelCase : int = pt_tuple_key[:-1] + ("mean",) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var __UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("var",) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # embedding __UpperCAmelCase : int = pt_tuple_key[:-1] + ("embedding",) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # conv layer __UpperCAmelCase : Any = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ): __UpperCAmelCase : Any = pt_tensor.transpose(2, 3, 1, 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer __UpperCAmelCase : Any = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ): __UpperCAmelCase : Optional[int] = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight __UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias __UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 __UpperCAmelCase : Tuple = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): __UpperCAmelCase : Union[str, Any] = pt_tuple_key[-2] + "_g" elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): __UpperCAmelCase : int = pt_tuple_key[-2] + "_v" if name is not None: __UpperCAmelCase : int = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[Any]: # convert pytorch tensor to numpy __UpperCAmelCase : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()} __UpperCAmelCase : Union[str, Any] = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: __UpperCAmelCase : Optional[Any] = flax_model.params["params"] else: __UpperCAmelCase : Optional[int] = flax_model.params __UpperCAmelCase : List[str] = flatten_dict(snake_case__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: __UpperCAmelCase : Any = flatten_dict(flax_model.params["batch_stats"] ) random_flax_state_dict.update(snake_case__ ) __UpperCAmelCase : Tuple = {} __UpperCAmelCase : Union[str, Any] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()} ) __UpperCAmelCase : Tuple = (model_prefix in flax_model_params) and ( model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): __UpperCAmelCase : Dict = tuple(pt_key.split("." ) ) # remove base model prefix if necessary __UpperCAmelCase : str = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: __UpperCAmelCase : Tuple = pt_tuple_key[1:] # Correctly rename weight parameters __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = rename_key_and_reshape_tensor( snake_case__, snake_case__, snake_case__, snake_case__ ) # add model prefix if necessary __UpperCAmelCase : Dict = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: __UpperCAmelCase : Tuple = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: __UpperCAmelCase : Union[str, Any] = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__, snake_case__ ) continue # also add unexpected weight so that warning is thrown __UpperCAmelCase : str = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown __UpperCAmelCase : Union[str, Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def _UpperCamelCase ( snake_case__, snake_case__ ) -> Tuple: import torch # Load the index __UpperCAmelCase : Union[str, Any] = {} for shard_file in shard_filenames: # load using msgpack utils __UpperCAmelCase : str = torch.load(snake_case__ ) __UpperCAmelCase : List[str] = {k: v.numpy() for k, v in pt_state_dict.items()} __UpperCAmelCase : Optional[Any] = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: __UpperCAmelCase : Optional[Any] = flax_model.params["params"] __UpperCAmelCase : Tuple = flatten_dict(snake_case__ ) random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) ) else: __UpperCAmelCase : int = flax_model.params __UpperCAmelCase : Optional[int] = flatten_dict(snake_case__ ) __UpperCAmelCase : Union[str, Any] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()} ) __UpperCAmelCase : str = (model_prefix in flax_model_params) and ( model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): __UpperCAmelCase : Any = tuple(pt_key.split("." ) ) # remove base model prefix if necessary __UpperCAmelCase : List[str] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: __UpperCAmelCase : int = pt_tuple_key[1:] # Correctly rename weight parameters __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = rename_key_and_reshape_tensor( snake_case__, snake_case__, snake_case__, snake_case__ ) # add model prefix if necessary __UpperCAmelCase : Optional[int] = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: __UpperCAmelCase : str = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: __UpperCAmelCase : Dict = jnp.asarray(snake_case__ ) continue if "var" in flax_key[-1]: __UpperCAmelCase : int = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__, snake_case__ ) continue # also add unexpected weight so that warning is thrown __UpperCAmelCase : str = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown __UpperCAmelCase : List[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def _UpperCamelCase ( snake_case__, snake_case__ ) -> Dict: __UpperCAmelCase : Union[str, Any] = os.path.abspath(snake_case__ ) logger.info(f'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class __UpperCAmelCase : str = getattr(snake_case__, "Flax" + model.__class__.__name__ ) # load flax weight dict with open(snake_case__, "rb" ) as state_f: try: __UpperCAmelCase : Optional[int] = from_bytes(snake_case__, state_f.read() ) except UnpicklingError: raise EnvironmentError(f'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(snake_case__, snake_case__ ) def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[int]: try: import torch # noqa: F401 except ImportError: logger.error( "Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see" " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" " instructions." ) raise # check if we have bf16 weights __UpperCAmelCase : Union[str, Any] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa, snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( "Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` " "before loading those in PyTorch model." ) __UpperCAmelCase : str = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, snake_case__ ) __UpperCAmelCase : Union[str, Any] = flatten_dict(snake_case__ ) __UpperCAmelCase : int = pt_model.state_dict() __UpperCAmelCase : str = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()} ) __UpperCAmelCase : Tuple = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : List[Any] = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): __UpperCAmelCase : str = flax_key_tuple[0] == pt_model.base_model_prefix __UpperCAmelCase : List[str] = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: __UpperCAmelCase : Union[str, Any] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: __UpperCAmelCase : int = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict: # conv layer __UpperCAmelCase : Any = flax_key_tuple[:-1] + ("weight",) __UpperCAmelCase : Optional[Any] = jnp.transpose(snake_case__, (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict: # linear layer __UpperCAmelCase : str = flax_key_tuple[:-1] + ("weight",) __UpperCAmelCase : Dict = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: __UpperCAmelCase : str = flax_key_tuple[:-1] + ("weight",) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: __UpperCAmelCase : Any = flax_key_tuple[:-1] + ("running_mean",) elif "var" in flax_key_tuple[-1]: __UpperCAmelCase : Any = flax_key_tuple[:-1] + ("running_var",) if "batch_stats" in flax_state: __UpperCAmelCase : List[str] = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: __UpperCAmelCase : str = ".".join(snake_case__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. __UpperCAmelCase : str = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: __UpperCAmelCase : str = key.split("." ) __UpperCAmelCase : Tuple = None if key_components[-3::2] == ["parametrizations", "original0"]: __UpperCAmelCase : List[str] = key_components[-2] + "_g" elif key_components[-3::2] == ["parametrizations", "original1"]: __UpperCAmelCase : Optional[Any] = key_components[-2] + "_v" if name is not None: __UpperCAmelCase : Optional[Any] = key_components[:-3] + [name] __UpperCAmelCase : Union[str, Any] = ".".join(snake_case__ ) __UpperCAmelCase : Any = key if flax_key in special_pt_names: __UpperCAmelCase : Optional[int] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict __UpperCAmelCase : Optional[Any] = np.asarray(snake_case__ ) if not isinstance(snake_case__, np.ndarray ) else flax_tensor __UpperCAmelCase : Tuple = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list __UpperCAmelCase : List[Any] = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( "Some weights of the Flax model were not used when initializing the PyTorch model" f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' " (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This" f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' " to be exactly identical (e.g. initializing a BertForSequenceClassification model from a" " FlaxBertForSequenceClassification model)." ) else: logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(snake_case__ ) > 0: logger.warning( f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' " use it for predictions and inference." ) else: logger.warning( f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' "If your task is similar to the task the model of the checkpoint was trained on, " f'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
342
import numpy as np import datasets _snake_case = ''' Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] ''' _snake_case = '''\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ''' _snake_case = ''' Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def _lowerCamelCase ( self: List[str] ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> List[str]: # convert to numpy arrays __UpperCAmelCase : int = np.array(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = np.array(__lowerCamelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction __UpperCAmelCase : str = X - np.mean(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = np.cov(reference_distribution.T ) try: __UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase ) except np.linalg.LinAlgError: __UpperCAmelCase : Optional[int] = np.linalg.pinv(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = np.dot(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
342
1
from math import isqrt def _UpperCamelCase ( snake_case__ ) -> bool: return all(number % divisor != 0 for divisor in range(2, isqrt(snake_case__ ) + 1 ) ) def _UpperCamelCase ( snake_case__ = 10**6 ) -> int: __UpperCAmelCase : List[str] = 0 __UpperCAmelCase : Any = 1 __UpperCAmelCase : List[str] = 7 while prime_candidate < max_prime: primes_count += is_prime(snake_case__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'{solution() = }')
342
import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class _snake_case ( unittest.TestCase ): def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str: __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : Optional[int] = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : List[str] = use_attention_mask __UpperCAmelCase : Dict = use_token_type_ids __UpperCAmelCase : Optional[int] = use_labels __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Dict = num_hidden_layers __UpperCAmelCase : Dict = num_attention_heads __UpperCAmelCase : Tuple = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_act __UpperCAmelCase : Tuple = hidden_dropout_prob __UpperCAmelCase : str = attention_probs_dropout_prob __UpperCAmelCase : Optional[Any] = max_position_embeddings __UpperCAmelCase : Optional[int] = type_vocab_size __UpperCAmelCase : str = type_sequence_label_size __UpperCAmelCase : Tuple = initializer_range __UpperCAmelCase : str = num_choices def _lowerCamelCase ( self: Optional[Any] ) -> List[str]: __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : str = None if self.use_attention_mask: __UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Any = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , ) return config, input_ids, attention_mask def _lowerCamelCase ( self: str ) -> Any: __UpperCAmelCase : List[str] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs __UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class _snake_case ( _lowercase , unittest.TestCase ): lowerCamelCase__: str = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self: List[Any] ) -> Dict: __UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self ) @slow def _lowerCamelCase ( self: Tuple ) -> Optional[Any]: for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" ) __UpperCAmelCase : Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCamelCase ) @require_flax class _snake_case ( unittest.TestCase ): @slow def _lowerCamelCase ( self: int ) -> List[Any]: __UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" ) __UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] __UpperCAmelCase : str = (1, 11, 7_68) self.assertEqual(output.shape , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
342
1
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _snake_case ( _lowercase ): def __init__( self: Optional[Any] , __lowerCamelCase: TransformeraDModel , __lowerCamelCase: AutoencoderKL , __lowerCamelCase: KarrasDiffusionSchedulers , __lowerCamelCase: Optional[Dict[int, str]] = None , ) -> Dict: super().__init__() self.register_modules(transformer=__lowerCamelCase , vae=__lowerCamelCase , scheduler=__lowerCamelCase ) # create a imagenet -> id dictionary for easier use __UpperCAmelCase : List[str] = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split("," ): __UpperCAmelCase : Any = int(__lowerCamelCase ) __UpperCAmelCase : List[str] = dict(sorted(self.labels.items() ) ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: Union[str, List[str]] ) -> List[int]: if not isinstance(__lowerCamelCase , __lowerCamelCase ): __UpperCAmelCase : int = list(__lowerCamelCase ) for l in label: if l not in self.labels: raise ValueError( f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self: Optional[Any] , __lowerCamelCase: List[int] , __lowerCamelCase: float = 4.0 , __lowerCamelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase: int = 50 , __lowerCamelCase: Optional[str] = "pil" , __lowerCamelCase: bool = True , ) -> Union[ImagePipelineOutput, Tuple]: __UpperCAmelCase : List[str] = len(__lowerCamelCase ) __UpperCAmelCase : List[str] = self.transformer.config.sample_size __UpperCAmelCase : str = self.transformer.config.in_channels __UpperCAmelCase : List[str] = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__lowerCamelCase , device=self.device , dtype=self.transformer.dtype , ) __UpperCAmelCase : Any = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents __UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase , device=self.device ).reshape(-1 ) __UpperCAmelCase : Dict = torch.tensor([10_00] * batch_size , device=self.device ) __UpperCAmelCase : Tuple = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(__lowerCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: __UpperCAmelCase : Optional[Any] = latent_model_input[: len(__lowerCamelCase ) // 2] __UpperCAmelCase : List[str] = torch.cat([half, half] , dim=0 ) __UpperCAmelCase : List[str] = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = t if not torch.is_tensor(__lowerCamelCase ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) __UpperCAmelCase : List[Any] = latent_model_input.device.type == "mps" if isinstance(__lowerCamelCase , __lowerCamelCase ): __UpperCAmelCase : List[str] = torch.floataa if is_mps else torch.floataa else: __UpperCAmelCase : int = torch.intaa if is_mps else torch.intaa __UpperCAmelCase : Union[str, Any] = torch.tensor([timesteps] , dtype=__lowerCamelCase , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: __UpperCAmelCase : Tuple = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __UpperCAmelCase : Any = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output __UpperCAmelCase : Any = self.transformer( __lowerCamelCase , timestep=__lowerCamelCase , class_labels=__lowerCamelCase ).sample # perform guidance if guidance_scale > 1: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] __UpperCAmelCase , __UpperCAmelCase : Tuple = torch.split(__lowerCamelCase , len(__lowerCamelCase ) // 2 , dim=0 ) __UpperCAmelCase : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps) __UpperCAmelCase : int = torch.cat([half_eps, half_eps] , dim=0 ) __UpperCAmelCase : int = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: __UpperCAmelCase , __UpperCAmelCase : Optional[int] = torch.split(__lowerCamelCase , __lowerCamelCase , dim=1 ) else: __UpperCAmelCase : Union[str, Any] = noise_pred # compute previous image: x_t -> x_t-1 __UpperCAmelCase : List[Any] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample if guidance_scale > 1: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = latent_model_input.chunk(2 , dim=0 ) else: __UpperCAmelCase : Optional[int] = latent_model_input __UpperCAmelCase : Dict = 1 / self.vae.config.scaling_factor * latents __UpperCAmelCase : List[str] = self.vae.decode(__lowerCamelCase ).sample __UpperCAmelCase : List[Any] = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __UpperCAmelCase : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __UpperCAmelCase : List[str] = self.numpy_to_pil(__lowerCamelCase ) if not return_dict: return (samples,) return ImagePipelineOutput(images=__lowerCamelCase )
342
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration _snake_case = [ # tf -> hf ('''/''', '''.'''), ('''layer_''', '''layers.'''), ('''kernel''', '''weight'''), ('''beta''', '''bias'''), ('''gamma''', '''weight'''), ('''pegasus''', '''model'''), ] _snake_case = [ ('''.output.dense''', '''.fc2'''), ('''intermediate.LayerNorm''', '''final_layer_norm'''), ('''intermediate.dense''', '''fc1'''), ] _snake_case = ( INIT_COMMON + [ ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.out_proj'''), ('''attention.self''', '''self_attn'''), ('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''), ('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''), ('''attention.encdec''', '''encoder_attn'''), ('''key''', '''k_proj'''), ('''value''', '''v_proj'''), ('''query''', '''q_proj'''), ('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''), ] + END_COMMON ) _snake_case = ( INIT_COMMON + [ ('''embeddings.word_embeddings''', '''shared.weight'''), ('''embeddings.position_embeddings''', '''embed_positions.weight'''), ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.output'''), ('''attention.self''', '''self_attn.self'''), ('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''), ] + END_COMMON ) _snake_case = [ '''encdec/key/bias''', '''encdec/query/bias''', '''encdec/value/bias''', '''self/key/bias''', '''self/query/bias''', '''self/value/bias''', '''encdec_output/dense/bias''', '''attention/output/dense/bias''', ] def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any: for tf_name, hf_name in patterns: __UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ ) return k def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration: __UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ ) __UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ ) __UpperCAmelCase : Optional[Any] = torch_model.state_dict() __UpperCAmelCase : Optional[int] = {} # separating decoder weights __UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )} __UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )} for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ): __UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE] if any(snake_case__ ): continue __UpperCAmelCase : List[str] = DECODER_PATTERNS __UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ ) if new_k not in state_dict: raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ["dense", "query", "key", "value"] ): __UpperCAmelCase : Optional[int] = v.T __UpperCAmelCase : str = torch.from_numpy(snake_case__ ) assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ): __UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE] if any(snake_case__ ): continue __UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS __UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ["dense", "query", "key", "value"] ): __UpperCAmelCase : List[Any] = v.T __UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' __UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"] __UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" ) __UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ ) __UpperCAmelCase : str = [ k for k in missing if k not in [ "final_logits_bias", "model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight", "lm_head.weight", ] ] assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], f'''no matches found for the following tf keys {extra}''' return torch_model def _UpperCamelCase ( snake_case__ ) -> Dict: __UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ ) __UpperCAmelCase : List[str] = {} __UpperCAmelCase : str = ["global_step"] for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ): __UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name ) if skip_key: continue __UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ ) __UpperCAmelCase : Tuple = array return tf_weights def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict: __UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ ) __UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ ) torch_model.save_pretrained(snake_case__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') _snake_case = parser.parse_args() _snake_case = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
342
1
import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _snake_case ( _lowercase , unittest.TestCase ): lowerCamelCase__: Optional[int] = CodeGenTokenizer lowerCamelCase__: int = CodeGenTokenizerFast lowerCamelCase__: List[Any] = True lowerCamelCase__: str = {"add_prefix_space": True} lowerCamelCase__: List[Any] = False def _lowerCamelCase ( self: List[str] ) -> Tuple: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCAmelCase : Tuple = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] __UpperCAmelCase : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) __UpperCAmelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] __UpperCAmelCase : Tuple = {"unk_token": "<unk>"} __UpperCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__lowerCamelCase ) ) def _lowerCamelCase ( self: List[Any] , **__lowerCamelCase: Union[str, Any] ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def _lowerCamelCase ( self: List[Any] , **__lowerCamelCase: Dict ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[int] ) -> Dict: __UpperCAmelCase : Tuple = "lower newer" __UpperCAmelCase : str = "lower newer" return input_text, output_text def _lowerCamelCase ( self: Optional[Any] ) -> Union[str, Any]: __UpperCAmelCase : int = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCAmelCase : List[str] = "lower newer" __UpperCAmelCase : List[str] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] __UpperCAmelCase : Dict = tokenizer.tokenize(__lowerCamelCase , add_prefix_space=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : int = tokens + [tokenizer.unk_token] __UpperCAmelCase : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) def _lowerCamelCase ( self: Tuple ) -> int: if not self.test_rust_tokenizer: return __UpperCAmelCase : Any = self.get_tokenizer() __UpperCAmelCase : str = self.get_rust_tokenizer(add_prefix_space=__lowerCamelCase ) __UpperCAmelCase : Dict = "lower newer" # Testing tokenization __UpperCAmelCase : Any = tokenizer.tokenize(__lowerCamelCase , add_prefix_space=__lowerCamelCase ) __UpperCAmelCase : str = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) # Testing conversion to ids without special tokens __UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) __UpperCAmelCase : Optional[int] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) # Testing conversion to ids with special tokens __UpperCAmelCase : Dict = self.get_rust_tokenizer(add_prefix_space=__lowerCamelCase ) __UpperCAmelCase : Tuple = tokenizer.encode(__lowerCamelCase , add_prefix_space=__lowerCamelCase ) __UpperCAmelCase : Any = rust_tokenizer.encode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) # Testing the unknown token __UpperCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token] __UpperCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) def _lowerCamelCase ( self: Optional[Any] , *__lowerCamelCase: List[Any] , **__lowerCamelCase: int ) -> List[Any]: # It's very difficult to mix/test pretokenization with byte-level # And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def _lowerCamelCase ( self: int , __lowerCamelCase: Any=15 ) -> List[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) # Simple input __UpperCAmelCase : Dict = "This is a simple input" __UpperCAmelCase : Any = ["This is a simple input 1", "This is a simple input 2"] __UpperCAmelCase : Tuple = ("This is a simple input", "This is a pair") __UpperCAmelCase : List[Any] = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" ) # Simple input self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" ) # Simple input self.assertRaises( __lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , ) # Pair input self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" ) # Pair input self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" ) # Pair input self.assertRaises( __lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , ) def _lowerCamelCase ( self: Optional[Any] ) -> Tuple: __UpperCAmelCase : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input __UpperCAmelCase : Tuple = "This is a simple input" __UpperCAmelCase : Optional[int] = ["This is a simple input looooooooong", "This is a simple input"] __UpperCAmelCase : List[str] = ("This is a simple input", "This is a pair") __UpperCAmelCase : int = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] __UpperCAmelCase : str = tokenizer.pad_token_id __UpperCAmelCase : Any = tokenizer(__lowerCamelCase , padding="max_length" , max_length=30 , return_tensors="np" ) __UpperCAmelCase : Optional[Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , truncate=__lowerCamelCase , return_tensors="np" ) __UpperCAmelCase : List[str] = tokenizer(*__lowerCamelCase , padding="max_length" , max_length=60 , return_tensors="np" ) __UpperCAmelCase : Optional[Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , truncate=__lowerCamelCase , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def _lowerCamelCase ( self: Optional[int] ) -> Union[str, Any]: __UpperCAmelCase : Dict = "$$$" __UpperCAmelCase : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowerCamelCase , add_bos_token=__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = "This is a simple input" __UpperCAmelCase : Optional[int] = ["This is a simple input 1", "This is a simple input 2"] __UpperCAmelCase : Any = tokenizer.bos_token_id __UpperCAmelCase : Any = tokenizer(__lowerCamelCase ) __UpperCAmelCase : int = tokenizer(__lowerCamelCase ) self.assertEqual(out_s.input_ids[0] , __lowerCamelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __UpperCAmelCase : Union[str, Any] = tokenizer.decode(out_s.input_ids ) __UpperCAmelCase : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __lowerCamelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def _lowerCamelCase ( self: Dict ) -> Optional[Any]: __UpperCAmelCase : Dict = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" ) __UpperCAmelCase : List[str] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#" __UpperCAmelCase : str = "\nif len_a > len_b: result = a\nelse: result = b" __UpperCAmelCase : Tuple = tokenizer.encode(__lowerCamelCase ) __UpperCAmelCase : int = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"] __UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowerCamelCase , truncate_before_pattern=__lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _lowerCamelCase ( self: str ) -> int: pass
342
import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class _snake_case ( _lowercase ): lowerCamelCase__: Any = ["image_processor", "tokenizer"] lowerCamelCase__: Optional[Any] = "BlipImageProcessor" lowerCamelCase__: Optional[int] = "AutoTokenizer" def __init__( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict: super().__init__(__lowerCamelCase , __lowerCamelCase ) # add QFormer tokenizer __UpperCAmelCase : Dict = qformer_tokenizer def __call__( self: Any , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Dict , ) -> BatchFeature: if images is None and text is None: raise ValueError("You have to specify at least images or text." ) __UpperCAmelCase : str = BatchFeature() if text is not None: __UpperCAmelCase : Any = self.tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) encoding.update(__lowerCamelCase ) __UpperCAmelCase : Dict = self.qformer_tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) __UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" ) __UpperCAmelCase : Optional[int] = qformer_text_encoding.pop("attention_mask" ) if images is not None: __UpperCAmelCase : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase ) encoding.update(__lowerCamelCase ) return encoding def _lowerCamelCase ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Optional[Any]: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: Dict ) -> Tuple: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def _lowerCamelCase ( self: List[str] ) -> Tuple: __UpperCAmelCase : str = self.tokenizer.model_input_names __UpperCAmelCase : Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> str: if os.path.isfile(__lowerCamelCase ): raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) __UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , "qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(__lowerCamelCase ) return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase ) @classmethod def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]: __UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" ) __UpperCAmelCase : List[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase ) args.append(__lowerCamelCase ) return cls(*__lowerCamelCase )
342
1
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _snake_case = get_logger(__name__) def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=0 ) -> Optional[Any]: os.makedirs(snake_case__, exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): __UpperCAmelCase : str = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __UpperCAmelCase : str = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin''' __UpperCAmelCase : List[str] = os.path.join(snake_case__, snake_case__ ) if accelerator.process_index == 0: logger.info(f'''Saving model to {output_model_file}''' ) torch.save(snake_case__, snake_case__ ) logger.info(f'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __UpperCAmelCase : Union[str, Any] = ( f'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) __UpperCAmelCase : Tuple = os.path.join(snake_case__, snake_case__ ) logger.info(f'''Saving model to {output_model_file}''' ) torch.save(snake_case__, snake_case__ ) logger.info(f'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __UpperCAmelCase : Tuple = os.path.join(snake_case__, f'''{MODEL_NAME}_{model_index}''' ) os.makedirs(snake_case__, exist_ok=snake_case__ ) logger.info(f'''Saving model to {ckpt_dir}''' ) __UpperCAmelCase : Dict = {"model": state_dict} dist_cp.save_state_dict( state_dict=snake_case__, storage_writer=dist_cp.FileSystemWriter(snake_case__ ), planner=DefaultSavePlanner(), ) logger.info(f'''Model saved to {ckpt_dir}''' ) def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=0 ) -> str: accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(snake_case__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " "initializing FSDP object" ) return __UpperCAmelCase : int = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin''' __UpperCAmelCase : str = os.path.join(snake_case__, snake_case__ ) logger.info(f'''Loading model from {input_model_file}''' ) __UpperCAmelCase : int = torch.load(snake_case__ ) logger.info(f'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __UpperCAmelCase : Tuple = ( f'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) __UpperCAmelCase : List[str] = os.path.join(snake_case__, snake_case__ ) logger.info(f'''Loading model from {input_model_file}''' ) __UpperCAmelCase : Dict = torch.load(snake_case__ ) logger.info(f'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __UpperCAmelCase : str = ( os.path.join(snake_case__, f'''{MODEL_NAME}_{model_index}''' ) if f'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(f'''Loading model from {ckpt_dir}''' ) __UpperCAmelCase : Optional[Any] = {"model": model.state_dict()} dist_cp.load_state_dict( state_dict=snake_case__, storage_reader=dist_cp.FileSystemReader(snake_case__ ), planner=DefaultLoadPlanner(), ) __UpperCAmelCase : str = state_dict["model"] logger.info(f'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(snake_case__ ) def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=0 ) -> Any: os.makedirs(snake_case__, exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): __UpperCAmelCase : int = FSDP.optim_state_dict(snake_case__, snake_case__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: __UpperCAmelCase : str = ( f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) __UpperCAmelCase : Optional[Any] = os.path.join(snake_case__, snake_case__ ) logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(snake_case__, snake_case__ ) logger.info(f'''Optimizer state saved in {output_optimizer_file}''' ) else: __UpperCAmelCase : List[Any] = os.path.join(snake_case__, f'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(snake_case__, exist_ok=snake_case__ ) logger.info(f'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={"optimizer": optim_state}, storage_writer=dist_cp.FileSystemWriter(snake_case__ ), planner=DefaultSavePlanner(), ) logger.info(f'''Optimizer state saved in {ckpt_dir}''' ) def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=0 ) -> Union[str, Any]: accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __UpperCAmelCase : Optional[int] = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: __UpperCAmelCase : Union[str, Any] = ( f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) __UpperCAmelCase : int = os.path.join(snake_case__, snake_case__ ) logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' ) __UpperCAmelCase : Dict = torch.load(snake_case__ ) logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' ) else: __UpperCAmelCase : int = ( os.path.join(snake_case__, f'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if f'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(f'''Loading Optimizer from {ckpt_dir}''' ) __UpperCAmelCase : Any = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict(), optimizer_key="optimizer", storage_reader=dist_cp.FileSystemReader(snake_case__ ), ) __UpperCAmelCase : Tuple = optim_state["optimizer"] logger.info(f'''Optimizer loaded from {ckpt_dir}''' ) __UpperCAmelCase : Optional[Any] = FSDP.optim_state_dict_to_load(snake_case__, snake_case__, snake_case__ ) optimizer.load_state_dict(snake_case__ )
342
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _snake_case = logging.get_logger(__name__) _snake_case = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } _snake_case = { '''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''}, '''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''}, '''tokenizer_config_file''': { '''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json''' }, } _snake_case = {'''facebook/blenderbot-3B''': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _UpperCamelCase ( ) -> Dict: __UpperCAmelCase : Tuple = ( list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) ) ) __UpperCAmelCase : str = bs[:] __UpperCAmelCase : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(snake_case__ ) cs.append(2**8 + n ) n += 1 __UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs] return dict(zip(snake_case__, snake_case__ ) ) def _UpperCamelCase ( snake_case__ ) -> Any: __UpperCAmelCase : List[Any] = set() __UpperCAmelCase : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase : Union[str, Any] = char return pairs class _snake_case ( _lowercase ): lowerCamelCase__: str = VOCAB_FILES_NAMES lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__: Dict = ["input_ids", "attention_mask"] def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]: __UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token __UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token __UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token __UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token __UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token __UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: __UpperCAmelCase : List[Any] = json.load(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()} __UpperCAmelCase : Dict = errors # how to handle errors in decoding __UpperCAmelCase : Optional[int] = bytes_to_unicode() __UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: __UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1] __UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges] __UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : Tuple = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _lowerCamelCase ( self: Dict ) -> Any: return len(self.encoder ) def _lowerCamelCase ( self: Optional[Any] ) -> List[str]: return dict(self.encoder , **self.added_tokens_encoder ) def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]: if token in self.cache: return self.cache[token] __UpperCAmelCase : List[Any] = tuple(__lowerCamelCase ) __UpperCAmelCase : Dict = get_pairs(__lowerCamelCase ) if not pairs: return token while True: __UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : str = 0 while i < len(__lowerCamelCase ): try: __UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase : Union[str, Any] = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase : List[Any] = tuple(__lowerCamelCase ) __UpperCAmelCase : str = new_word if len(__lowerCamelCase ) == 1: break else: __UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = word return word def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict: __UpperCAmelCase : Any = [] for token in re.findall(self.pat , __lowerCamelCase ): __UpperCAmelCase : int = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict: return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]: return self.decoder.get(__lowerCamelCase ) def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int: __UpperCAmelCase : Dict = "".join(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase : Any = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __UpperCAmelCase : Dict = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) __UpperCAmelCase : Optional[Any] = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) __UpperCAmelCase : Optional[Any] = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]: __UpperCAmelCase : int = [self.sep_token_id] __UpperCAmelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]: __UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): __UpperCAmelCase : Optional[Any] = " " + text return (text, kwargs) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]: return token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]: __UpperCAmelCase : Tuple = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase ) if len(__lowerCamelCase ) > self.model_max_length: __UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
342
1
def _UpperCamelCase ( snake_case__ ) -> list[int]: if num <= 0: raise ValueError("Input must be a positive integer" ) __UpperCAmelCase : List[Any] = [True] * (num + 1) __UpperCAmelCase : List[str] = 2 while p * p <= num: if primes[p]: for i in range(p * p, num + 1, snake_case__ ): __UpperCAmelCase : Optional[int] = False p += 1 return [prime for prime in range(2, num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() _snake_case = int(input('''Enter a positive integer: ''').strip()) print(prime_sieve_eratosthenes(user_num))
342
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( _lowercase , unittest.TestCase ): lowerCamelCase__: List[Any] = CanineTokenizer lowerCamelCase__: Optional[int] = False def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: super().setUp() __UpperCAmelCase : Tuple = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]: return CanineTokenizer.from_pretrained("google/canine-s" ) def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> CanineTokenizer: __UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) __UpperCAmelCase : Optional[int] = 10_24 return tokenizer @require_torch def _lowerCamelCase ( self: List[str] ) -> int: __UpperCAmelCase : Union[str, Any] = self.canine_tokenizer __UpperCAmelCase : List[str] = ["Life is like a box of chocolates.", "You never know what you're gonna get."] # fmt: off __UpperCAmelCase : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on __UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def _lowerCamelCase ( self: Optional[Any] ) -> Tuple: __UpperCAmelCase : Optional[Any] = self.canine_tokenizer __UpperCAmelCase : Dict = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."] __UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("input_ids" , __lowerCamelCase ) self.assertIn("attention_mask" , __lowerCamelCase ) self.assertIn("token_type_ids" , __lowerCamelCase ) @require_torch def _lowerCamelCase ( self: Any ) -> List[str]: __UpperCAmelCase : Optional[Any] = self.canine_tokenizer __UpperCAmelCase : int = [ "What's the weater?", "It's about 25 degrees.", ] __UpperCAmelCase : List[Any] = tokenizer( text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) def _lowerCamelCase ( self: List[Any] ) -> Tuple: # safety check on max_len default value so we are sure the test works __UpperCAmelCase : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __UpperCAmelCase : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __UpperCAmelCase : int = tempfile.mkdtemp() __UpperCAmelCase : List[Any] = " He is very happy, UNwant\u00E9d,running" __UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) tokenizer.save_pretrained(__lowerCamelCase ) __UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCamelCase ) __UpperCAmelCase : Dict = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) shutil.rmtree(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __UpperCAmelCase : List[Any] = tempfile.mkdtemp() __UpperCAmelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running" __UpperCAmelCase : str = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: __UpperCAmelCase : Tuple = chr(0xE_0_0_7 ) additional_special_tokens.append(__lowerCamelCase ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) __UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) tokenizer.save_pretrained(__lowerCamelCase ) __UpperCAmelCase : str = tokenizer.__class__.from_pretrained(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__lowerCamelCase ) def _lowerCamelCase ( self: str ) -> Optional[int]: __UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_clean_sequence(__lowerCamelCase ) # a special token for Canine can be defined as follows: __UpperCAmelCase : int = 0xE_0_0_5 __UpperCAmelCase : Tuple = chr(__lowerCamelCase ) tokenizer.add_special_tokens({"cls_token": special_token} ) __UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(len(__lowerCamelCase ) , 1 ) __UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) __UpperCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) __UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(__lowerCamelCase , input_encoded + special_token_id ) __UpperCAmelCase : Optional[int] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) self.assertTrue(special_token not in decoded ) def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]: __UpperCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase : Optional[int] = chr(0xE_0_0_5 ) __UpperCAmelCase : List[str] = chr(0xE_0_0_6 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} ) __UpperCAmelCase : Tuple = tokenizer.tokenize(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase ) self.assertEqual(len(__lowerCamelCase ) , 1 ) self.assertEqual(len(__lowerCamelCase ) , 1 ) self.assertEqual(token_a[0] , __lowerCamelCase ) self.assertEqual(token_a[0] , __lowerCamelCase ) @require_tokenizers def _lowerCamelCase ( self: str ) -> Union[str, Any]: __UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: __UpperCAmelCase : Union[str, Any] = 0xE_0_0_6 __UpperCAmelCase : int = chr(__lowerCamelCase ) __UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase ) tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__lowerCamelCase ) tokenizer.from_pretrained(__lowerCamelCase ) def _lowerCamelCase ( self: Dict ) -> List[str]: __UpperCAmelCase : str = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: __UpperCAmelCase : Tuple = json.load(__lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: __UpperCAmelCase : Optional[int] = json.load(__lowerCamelCase ) # a special token for Canine can be defined as follows: __UpperCAmelCase : Any = 0xE_0_0_6 __UpperCAmelCase : Union[str, Any] = chr(__lowerCamelCase ) __UpperCAmelCase : Dict = [new_token_a] __UpperCAmelCase : int = [new_token_a] with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(__lowerCamelCase , __lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(__lowerCamelCase , __lowerCamelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 ) self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) __UpperCAmelCase : List[Any] = 0xE_0_0_7 __UpperCAmelCase : List[Any] = chr(__lowerCamelCase ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __UpperCAmelCase : str = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )] __UpperCAmelCase : Dict = tokenizer_class.from_pretrained( __lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 ) self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase : int = "hello world" if self.space_between_special_tokens: __UpperCAmelCase : Any = "[CLS] hello world [SEP]" else: __UpperCAmelCase : Union[str, Any] = input __UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) __UpperCAmelCase : Any = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__lowerCamelCase , [output, output.lower()] ) def _lowerCamelCase ( self: Dict ) -> Any: __UpperCAmelCase : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase : List[str] = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] __UpperCAmelCase : List[str] = "a" __UpperCAmelCase : Any = ord(__lowerCamelCase ) for attr in attributes_list: setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase ) setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase ) setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] ) __UpperCAmelCase : Tuple = 0xE_0_0_6 __UpperCAmelCase : Optional[Any] = chr(__lowerCamelCase ) setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] ) def _lowerCamelCase ( self: str ) -> Union[str, Any]: pass def _lowerCamelCase ( self: Any ) -> Any: pass def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple: pass def _lowerCamelCase ( self: Optional[int] ) -> Any: pass def _lowerCamelCase ( self: List[str] ) -> str: pass def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]: pass def _lowerCamelCase ( self: Optional[Any] ) -> Tuple: pass def _lowerCamelCase ( self: str ) -> Tuple: pass
342
1
import random def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ = False ) -> dict: __UpperCAmelCase : dict = {i: [] for i in range(snake_case__ )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(snake_case__ ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(snake_case__ ): for j in range(i + 1, snake_case__ ): if random.random() < probability: graph[i].append(snake_case__ ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(snake_case__ ) return graph def _UpperCamelCase ( snake_case__ ) -> dict: return { i: [j for j in range(snake_case__ ) if i != j] for i in range(snake_case__ ) } if __name__ == "__main__": import doctest doctest.testmod()
342
import logging import os from .state import PartialState class _snake_case ( logging.LoggerAdapter ): @staticmethod def _lowerCamelCase ( __lowerCamelCase: Any ) -> int: __UpperCAmelCase : str = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]: if PartialState._shared_state == {}: raise RuntimeError( "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." ) __UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase ) if self.isEnabledFor(__lowerCamelCase ): if self._should_log(__lowerCamelCase ): __UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase ) self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) elif in_order: __UpperCAmelCase : Optional[int] = PartialState() for i in range(state.num_processes ): if i == state.process_index: __UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase ) self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) state.wait_for_everyone() def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]: if log_level is None: __UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ ) __UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(snake_case__, {} )
342
1
from __future__ import annotations import math def _UpperCamelCase ( snake_case__ ) -> list[int]: if num <= 0: __UpperCAmelCase : List[Any] = f'''{num}: Invalid input, please enter a positive integer.''' raise ValueError(snake_case__ ) __UpperCAmelCase : List[str] = [True] * (num + 1) __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : Optional[int] = 2 __UpperCAmelCase : Optional[int] = int(math.sqrt(snake_case__ ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(snake_case__ ) # Set multiples of start be False for i in range(start * start, num + 1, snake_case__ ): if sieve[i] is True: __UpperCAmelCase : Any = False start += 1 for j in range(end + 1, num + 1 ): if sieve[j] is True: prime.append(snake_case__ ) return prime if __name__ == "__main__": print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
342
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _snake_case ( _lowercase ): def __init__( self: Optional[Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str: super().__init__( __lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , ) __UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths} __UpperCAmelCase : int = Text( cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , ) def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]: # Build iterable dataset if self.streaming: __UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCAmelCase : Any = None __UpperCAmelCase : Any = None __UpperCAmelCase : Dict = None __UpperCAmelCase : str = None self.builder.download_and_prepare( download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , ) __UpperCAmelCase : Dict = self.builder.as_dataset( split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory ) return dataset
342
1
import re def _UpperCamelCase ( snake_case__ ) -> bool: __UpperCAmelCase : Union[str, Any] = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" ) if match := re.search(snake_case__, snake_case__ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator('''+918827897895'''))
342
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _snake_case = { '''configuration_trajectory_transformer''': [ '''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrajectoryTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrajectoryTransformerModel''', '''TrajectoryTransformerPreTrainedModel''', '''load_tf_weights_in_trajectory_transformer''', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
342
1
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( _lowercase , unittest.TestCase ): lowerCamelCase__: List[Any] = CanineTokenizer lowerCamelCase__: Optional[int] = False def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: super().setUp() __UpperCAmelCase : Tuple = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]: return CanineTokenizer.from_pretrained("google/canine-s" ) def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> CanineTokenizer: __UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) __UpperCAmelCase : Optional[int] = 10_24 return tokenizer @require_torch def _lowerCamelCase ( self: List[str] ) -> int: __UpperCAmelCase : Union[str, Any] = self.canine_tokenizer __UpperCAmelCase : List[str] = ["Life is like a box of chocolates.", "You never know what you're gonna get."] # fmt: off __UpperCAmelCase : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on __UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def _lowerCamelCase ( self: Optional[Any] ) -> Tuple: __UpperCAmelCase : Optional[Any] = self.canine_tokenizer __UpperCAmelCase : Dict = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."] __UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("input_ids" , __lowerCamelCase ) self.assertIn("attention_mask" , __lowerCamelCase ) self.assertIn("token_type_ids" , __lowerCamelCase ) @require_torch def _lowerCamelCase ( self: Any ) -> List[str]: __UpperCAmelCase : Optional[Any] = self.canine_tokenizer __UpperCAmelCase : int = [ "What's the weater?", "It's about 25 degrees.", ] __UpperCAmelCase : List[Any] = tokenizer( text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) def _lowerCamelCase ( self: List[Any] ) -> Tuple: # safety check on max_len default value so we are sure the test works __UpperCAmelCase : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __UpperCAmelCase : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __UpperCAmelCase : int = tempfile.mkdtemp() __UpperCAmelCase : List[Any] = " He is very happy, UNwant\u00E9d,running" __UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) tokenizer.save_pretrained(__lowerCamelCase ) __UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCamelCase ) __UpperCAmelCase : Dict = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) shutil.rmtree(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __UpperCAmelCase : List[Any] = tempfile.mkdtemp() __UpperCAmelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running" __UpperCAmelCase : str = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: __UpperCAmelCase : Tuple = chr(0xE_0_0_7 ) additional_special_tokens.append(__lowerCamelCase ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) __UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) tokenizer.save_pretrained(__lowerCamelCase ) __UpperCAmelCase : str = tokenizer.__class__.from_pretrained(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__lowerCamelCase ) def _lowerCamelCase ( self: str ) -> Optional[int]: __UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_clean_sequence(__lowerCamelCase ) # a special token for Canine can be defined as follows: __UpperCAmelCase : int = 0xE_0_0_5 __UpperCAmelCase : Tuple = chr(__lowerCamelCase ) tokenizer.add_special_tokens({"cls_token": special_token} ) __UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(len(__lowerCamelCase ) , 1 ) __UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) __UpperCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) __UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(__lowerCamelCase , input_encoded + special_token_id ) __UpperCAmelCase : Optional[int] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) self.assertTrue(special_token not in decoded ) def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]: __UpperCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase : Optional[int] = chr(0xE_0_0_5 ) __UpperCAmelCase : List[str] = chr(0xE_0_0_6 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} ) __UpperCAmelCase : Tuple = tokenizer.tokenize(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase ) self.assertEqual(len(__lowerCamelCase ) , 1 ) self.assertEqual(len(__lowerCamelCase ) , 1 ) self.assertEqual(token_a[0] , __lowerCamelCase ) self.assertEqual(token_a[0] , __lowerCamelCase ) @require_tokenizers def _lowerCamelCase ( self: str ) -> Union[str, Any]: __UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: __UpperCAmelCase : Union[str, Any] = 0xE_0_0_6 __UpperCAmelCase : int = chr(__lowerCamelCase ) __UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase ) tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__lowerCamelCase ) tokenizer.from_pretrained(__lowerCamelCase ) def _lowerCamelCase ( self: Dict ) -> List[str]: __UpperCAmelCase : str = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: __UpperCAmelCase : Tuple = json.load(__lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: __UpperCAmelCase : Optional[int] = json.load(__lowerCamelCase ) # a special token for Canine can be defined as follows: __UpperCAmelCase : Any = 0xE_0_0_6 __UpperCAmelCase : Union[str, Any] = chr(__lowerCamelCase ) __UpperCAmelCase : Dict = [new_token_a] __UpperCAmelCase : int = [new_token_a] with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(__lowerCamelCase , __lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(__lowerCamelCase , __lowerCamelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 ) self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) __UpperCAmelCase : List[Any] = 0xE_0_0_7 __UpperCAmelCase : List[Any] = chr(__lowerCamelCase ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __UpperCAmelCase : str = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )] __UpperCAmelCase : Dict = tokenizer_class.from_pretrained( __lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 ) self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase : int = "hello world" if self.space_between_special_tokens: __UpperCAmelCase : Any = "[CLS] hello world [SEP]" else: __UpperCAmelCase : Union[str, Any] = input __UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) __UpperCAmelCase : Any = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__lowerCamelCase , [output, output.lower()] ) def _lowerCamelCase ( self: Dict ) -> Any: __UpperCAmelCase : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase : List[str] = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] __UpperCAmelCase : List[str] = "a" __UpperCAmelCase : Any = ord(__lowerCamelCase ) for attr in attributes_list: setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase ) setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase ) setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] ) __UpperCAmelCase : Tuple = 0xE_0_0_6 __UpperCAmelCase : Optional[Any] = chr(__lowerCamelCase ) setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] ) def _lowerCamelCase ( self: str ) -> Union[str, Any]: pass def _lowerCamelCase ( self: Any ) -> Any: pass def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple: pass def _lowerCamelCase ( self: Optional[int] ) -> Any: pass def _lowerCamelCase ( self: List[str] ) -> str: pass def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]: pass def _lowerCamelCase ( self: Optional[Any] ) -> Tuple: pass def _lowerCamelCase ( self: str ) -> Tuple: pass
342
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]: __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : Optional[int] = image_size __UpperCAmelCase : List[str] = num_channels __UpperCAmelCase : Union[str, Any] = num_stages __UpperCAmelCase : List[str] = hidden_sizes __UpperCAmelCase : Any = depths __UpperCAmelCase : Optional[int] = is_training __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : Optional[int] = intermediate_size __UpperCAmelCase : Optional[Any] = hidden_act __UpperCAmelCase : Union[str, Any] = num_labels __UpperCAmelCase : Any = initializer_range __UpperCAmelCase : List[str] = out_features __UpperCAmelCase : Tuple = out_indices __UpperCAmelCase : List[Any] = scope def _lowerCamelCase ( self: List[Any] ) -> Optional[int]: __UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : List[str] = None if self.use_labels: __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase : Optional[Any] = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self: Tuple ) -> List[Any]: return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]: __UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : List[str] = model(__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple: __UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Any = model(__lowerCamelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Any = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _lowerCamelCase ( self: int ) -> List[str]: __UpperCAmelCase : int = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs __UpperCAmelCase : str = {"pixel_values": pixel_values} return config, inputs_dict def _lowerCamelCase ( self: List[Any] ) -> List[Any]: __UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class _snake_case ( _lowercase , _lowercase , unittest.TestCase ): lowerCamelCase__: Dict = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCamelCase__: str = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCamelCase__: Tuple = False lowerCamelCase__: int = False lowerCamelCase__: Dict = False lowerCamelCase__: int = False lowerCamelCase__: Any = False def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]: __UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self ) __UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def _lowerCamelCase ( self: Dict ) -> Tuple: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self: List[Any] ) -> int: return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def _lowerCamelCase ( self: Any ) -> Any: pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def _lowerCamelCase ( self: str ) -> Optional[Any]: pass def _lowerCamelCase ( self: List[Any] ) -> int: if not self.model_tester.is_training: return for model_class in self.all_model_classes: __UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels() __UpperCAmelCase : Optional[Any] = True if model_class.__name__ in [ *get_values(__lowerCamelCase ), *get_values(__lowerCamelCase ), ]: continue __UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() __UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) __UpperCAmelCase : Any = model(**__lowerCamelCase ).loss loss.backward() def _lowerCamelCase ( self: Optional[int] ) -> Dict: if not self.model_tester.is_training: return for model_class in self.all_model_classes: __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels() __UpperCAmelCase : List[str] = False __UpperCAmelCase : int = True if ( model_class.__name__ in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )] or not model_class.supports_gradient_checkpointing ): continue __UpperCAmelCase : int = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.gradient_checkpointing_enable() model.train() __UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) __UpperCAmelCase : Any = model(**__lowerCamelCase ).loss loss.backward() def _lowerCamelCase ( self: List[str] ) -> Dict: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : str = model_class(__lowerCamelCase ) __UpperCAmelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : List[Any] = [*signature.parameters.keys()] __UpperCAmelCase : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _lowerCamelCase ( self: str ) -> List[Any]: __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _lowerCamelCase ( self: Union[str, Any] ) -> Dict: def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ): __UpperCAmelCase : Any = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) __UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : Optional[int] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Optional[int] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Any = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def _lowerCamelCase ( self: Dict ) -> List[Any]: for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def _UpperCamelCase ( ) -> List[Any]: __UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self: Optional[int] ) -> Dict: return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def _lowerCamelCase ( self: List[Any] ) -> Tuple: __UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase ) __UpperCAmelCase : List[str] = self.default_image_processor __UpperCAmelCase : Optional[Any] = prepare_img() __UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): __UpperCAmelCase : str = model(**__lowerCamelCase ) # verify the logits __UpperCAmelCase : Dict = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) __UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
342
1
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf _snake_case = logging.get_logger(__name__) @dataclass class _snake_case ( _lowercase ): lowerCamelCase__: Dict = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self: Dict , **__lowerCamelCase: Union[str, Any] ) -> int: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __UpperCAmelCase : Optional[Any] = deprecated_arg[3:] __UpperCAmelCase : Any = not kwargs.pop(__lowerCamelCase ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) __UpperCAmelCase : Union[str, Any] = kwargs.pop("tpu_name" , self.tpu_name ) __UpperCAmelCase : List[str] = kwargs.pop("device_idx" , self.device_idx ) __UpperCAmelCase : str = kwargs.pop("eager_mode" , self.eager_mode ) __UpperCAmelCase : List[Any] = kwargs.pop("use_xla" , self.use_xla ) super().__init__(**__lowerCamelCase ) lowerCamelCase__: str = field( default=_lowercase , metadata={"help": "Name of TPU"} , ) lowerCamelCase__: int = field( default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , ) lowerCamelCase__: bool = field(default=_lowercase , metadata={"help": "Benchmark models in eager model."} ) lowerCamelCase__: bool = field( default=_lowercase , metadata={ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`." } , ) @cached_property def _lowerCamelCase ( self: List[str] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["tf"] ) __UpperCAmelCase : List[Any] = None if self.tpu: try: if self.tpu_name: __UpperCAmelCase : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __UpperCAmelCase : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __UpperCAmelCase : int = None return tpu @cached_property def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["tf"] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __UpperCAmelCase : Dict = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" ) __UpperCAmelCase : int = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] , "GPU" ) # disable GPU __UpperCAmelCase : str = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' ) return strategy @property def _lowerCamelCase ( self: Tuple ) -> bool: requires_backends(self , ["tf"] ) return self._setup_tpu is not None @property def _lowerCamelCase ( self: Union[str, Any] ) -> "tf.distribute.Strategy": requires_backends(self , ["tf"] ) return self._setup_strategy @property def _lowerCamelCase ( self: Optional[Any] ) -> str: requires_backends(self , ["tf"] ) return tf.config.list_physical_devices("GPU" ) @property def _lowerCamelCase ( self: List[str] ) -> int: requires_backends(self , ["tf"] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _lowerCamelCase ( self: Union[str, Any] ) -> bool: return self.n_gpu > 0
342
import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _snake_case = logging.get_logger(__name__) _snake_case = { '''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''', # See all DETR models at https://huggingface.co/models?filter=detr } class _snake_case ( _lowercase ): lowerCamelCase__: str = "detr" lowerCamelCase__: Dict = ["past_key_values"] lowerCamelCase__: str = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int: if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): __UpperCAmelCase : List[Any] = backbone_config.get("model_type" ) __UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type] __UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase ) # set timm attributes to None __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None __UpperCAmelCase : Any = use_timm_backbone __UpperCAmelCase : Optional[Any] = backbone_config __UpperCAmelCase : Optional[Any] = num_channels __UpperCAmelCase : List[Any] = num_queries __UpperCAmelCase : Optional[int] = d_model __UpperCAmelCase : Optional[Any] = encoder_ffn_dim __UpperCAmelCase : Dict = encoder_layers __UpperCAmelCase : List[Any] = encoder_attention_heads __UpperCAmelCase : int = decoder_ffn_dim __UpperCAmelCase : Tuple = decoder_layers __UpperCAmelCase : int = decoder_attention_heads __UpperCAmelCase : List[Any] = dropout __UpperCAmelCase : Dict = attention_dropout __UpperCAmelCase : Optional[Any] = activation_dropout __UpperCAmelCase : int = activation_function __UpperCAmelCase : Any = init_std __UpperCAmelCase : str = init_xavier_std __UpperCAmelCase : int = encoder_layerdrop __UpperCAmelCase : Tuple = decoder_layerdrop __UpperCAmelCase : List[Any] = encoder_layers __UpperCAmelCase : Optional[Any] = auxiliary_loss __UpperCAmelCase : int = position_embedding_type __UpperCAmelCase : Optional[int] = backbone __UpperCAmelCase : str = use_pretrained_backbone __UpperCAmelCase : Dict = dilation # Hungarian matcher __UpperCAmelCase : Optional[int] = class_cost __UpperCAmelCase : Optional[Any] = bbox_cost __UpperCAmelCase : Optional[int] = giou_cost # Loss coefficients __UpperCAmelCase : Any = mask_loss_coefficient __UpperCAmelCase : Any = dice_loss_coefficient __UpperCAmelCase : Any = bbox_loss_coefficient __UpperCAmelCase : Optional[int] = giou_loss_coefficient __UpperCAmelCase : Optional[Any] = eos_coefficient super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase ) @property def _lowerCamelCase ( self: Dict ) -> int: return self.encoder_attention_heads @property def _lowerCamelCase ( self: str ) -> int: return self.d_model @classmethod def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]: return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: str ) -> Dict[str, any]: __UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: __UpperCAmelCase : int = self.backbone_config.to_dict() __UpperCAmelCase : List[str] = self.__class__.model_type return output class _snake_case ( _lowercase ): lowerCamelCase__: Optional[int] = version.parse("1.11" ) @property def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _lowerCamelCase ( self: Optional[Any] ) -> float: return 1e-5 @property def _lowerCamelCase ( self: List[str] ) -> int: return 12
342
1
def _UpperCamelCase ( snake_case__ ) -> int: __UpperCAmelCase : List[Any] = [1] __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = 0, 0, 0 __UpperCAmelCase : Tuple = ugly_nums[ia] * 2 __UpperCAmelCase : Dict = ugly_nums[ia] * 3 __UpperCAmelCase : Optional[int] = ugly_nums[ia] * 5 for _ in range(1, snake_case__ ): __UpperCAmelCase : List[Any] = min(snake_case__, snake_case__, snake_case__ ) ugly_nums.append(snake_case__ ) if next_num == next_a: ia += 1 __UpperCAmelCase : List[Any] = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 __UpperCAmelCase : List[Any] = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 __UpperCAmelCase : Optional[int] = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(F'{ugly_numbers(200) = }')
342
from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=1e-1_2 ) -> str: __UpperCAmelCase : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T __UpperCAmelCase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T return jnp.matmul(snake_case__, norm_emb_a.T ) class _snake_case ( nn.Module ): lowerCamelCase__: CLIPConfig lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: Any ) -> Tuple: __UpperCAmelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config ) __UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype ) __UpperCAmelCase : int = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) ) __UpperCAmelCase : int = self.param( "special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) ) __UpperCAmelCase : Tuple = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) ) __UpperCAmelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) ) def __call__( self: List[Any] , __lowerCamelCase: Dict ) -> Dict: __UpperCAmelCase : Optional[int] = self.vision_model(__lowerCamelCase )[1] __UpperCAmelCase : List[str] = self.visual_projection(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds ) __UpperCAmelCase : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs __UpperCAmelCase : List[str] = 0.0 __UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment __UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 ) __UpperCAmelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase ) # Use a lower threshold if an image has any special care concept __UpperCAmelCase : List[Any] = is_special_care * 0.01 __UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment __UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 ) __UpperCAmelCase : Any = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class _snake_case ( _lowercase ): lowerCamelCase__: int = CLIPConfig lowerCamelCase__: Tuple = "clip_input" lowerCamelCase__: str = FlaxStableDiffusionSafetyCheckerModule def __init__( self: Union[str, Any] , __lowerCamelCase: CLIPConfig , __lowerCamelCase: Optional[Tuple] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: jnp.dtype = jnp.floataa , __lowerCamelCase: bool = True , **__lowerCamelCase: Optional[int] , ) -> int: if input_shape is None: __UpperCAmelCase : Dict = (1, 2_24, 2_24, 3) __UpperCAmelCase : Tuple = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase ) super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: jax.random.KeyArray , __lowerCamelCase: Tuple , __lowerCamelCase: FrozenDict = None ) -> FrozenDict: # init input tensor __UpperCAmelCase : Tuple = jax.random.normal(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Dict = jax.random.split(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng} __UpperCAmelCase : str = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"] return random_params def __call__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: dict = None , ) -> List[Any]: __UpperCAmelCase : int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) ) return self.module.apply( {"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
342
1
from __future__ import annotations import math _snake_case = '''2020.9.26''' _snake_case = '''xcodz-dot, cclaus, dhruvmanila''' def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> tuple[float, float]: if not all(isinstance(snake_case__, (float, int) ) for val in locals().values() ): __UpperCAmelCase : List[str] = f'''Input values must either be float or int: {list(locals().values() )}''' raise TypeError(snake_case__ ) __UpperCAmelCase : Any = ((x * distance) / (z + distance)) * scale __UpperCAmelCase : Optional[int] = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> tuple[float, float, float]: if not isinstance(snake_case__, snake_case__ ): raise TypeError("Axis must be a str" ) __UpperCAmelCase : Dict = locals() del input_variables["axis"] if not all(isinstance(snake_case__, (float, int) ) for val in input_variables.values() ): __UpperCAmelCase : List[Any] = ( "Input values except axis must either be float or int: " f'''{list(input_variables.values() )}''' ) raise TypeError(snake_case__ ) __UpperCAmelCase : List[Any] = (angle % 360) / 450 * 180 / math.pi if axis == "z": __UpperCAmelCase : Tuple = x * math.cos(snake_case__ ) - y * math.sin(snake_case__ ) __UpperCAmelCase : int = y * math.cos(snake_case__ ) + x * math.sin(snake_case__ ) __UpperCAmelCase : Tuple = z elif axis == "x": __UpperCAmelCase : Optional[Any] = y * math.cos(snake_case__ ) - z * math.sin(snake_case__ ) __UpperCAmelCase : Dict = z * math.cos(snake_case__ ) + y * math.sin(snake_case__ ) __UpperCAmelCase : int = x elif axis == "y": __UpperCAmelCase : Optional[int] = x * math.cos(snake_case__ ) - z * math.sin(snake_case__ ) __UpperCAmelCase : Union[str, Any] = z * math.cos(snake_case__ ) + x * math.sin(snake_case__ ) __UpperCAmelCase : Tuple = y else: raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(F'{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }') print(F'{rotate(1.0, 2.0, 3.0, "y", 9_0.0) = }')
342
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def _UpperCamelCase ( snake_case__ ) -> Tuple: __UpperCAmelCase : Union[str, Any] = 384 if "tiny" in model_name: __UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3] __UpperCAmelCase : List[Any] = [96, 192, 384, 768] if "small" in model_name: __UpperCAmelCase : Tuple = [3, 3, 27, 3] __UpperCAmelCase : Any = [96, 192, 384, 768] if "base" in model_name: __UpperCAmelCase : str = [3, 3, 27, 3] __UpperCAmelCase : str = [128, 256, 512, 1024] __UpperCAmelCase : str = 512 if "large" in model_name: __UpperCAmelCase : Dict = [3, 3, 27, 3] __UpperCAmelCase : int = [192, 384, 768, 1536] __UpperCAmelCase : Dict = 768 if "xlarge" in model_name: __UpperCAmelCase : List[Any] = [3, 3, 27, 3] __UpperCAmelCase : Tuple = [256, 512, 1024, 2048] __UpperCAmelCase : int = 1024 # set label information __UpperCAmelCase : List[Any] = 150 __UpperCAmelCase : str = "huggingface/label-files" __UpperCAmelCase : List[Any] = "ade20k-id2label.json" __UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) ) __UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()} __UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()} __UpperCAmelCase : int = ConvNextConfig( depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] ) __UpperCAmelCase : int = UperNetConfig( backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, ) return config def _UpperCamelCase ( snake_case__ ) -> Tuple: __UpperCAmelCase : Optional[int] = [] # fmt: off # stem rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") ) rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") ) rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") ) rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any: __UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ ) __UpperCAmelCase : Optional[int] = val def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]: __UpperCAmelCase : Dict = { "upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth", "upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth", "upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth", "upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth", "upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth", } __UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name] __UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"] __UpperCAmelCase : Dict = get_upernet_config(snake_case__ ) __UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): __UpperCAmelCase : str = state_dict.pop(snake_case__ ) if "bn" in key: __UpperCAmelCase : int = key.replace("bn", "batch_norm" ) __UpperCAmelCase : Union[str, Any] = val # rename keys __UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__, snake_case__, snake_case__ ) model.load_state_dict(snake_case__ ) # verify on image __UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" __UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" ) __UpperCAmelCase : str = SegformerImageProcessor() __UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values with torch.no_grad(): __UpperCAmelCase : Union[str, Any] = model(snake_case__ ) if model_name == "upernet-convnext-tiny": __UpperCAmelCase : Any = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ) elif model_name == "upernet-convnext-small": __UpperCAmelCase : Optional[Any] = torch.tensor( [[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] ) elif model_name == "upernet-convnext-base": __UpperCAmelCase : Dict = torch.tensor( [[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] ) elif model_name == "upernet-convnext-large": __UpperCAmelCase : Tuple = torch.tensor( [[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] ) elif model_name == "upernet-convnext-xlarge": __UpperCAmelCase : Union[str, Any] = torch.tensor( [[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] ) print("Logits:", outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case__ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(snake_case__ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-convnext-tiny''', type=str, choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']], help='''Name of the ConvNext UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _snake_case = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
342
1
import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _snake_case = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', F'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', F'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''), ('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ] ) def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> int: __UpperCAmelCase : List[str] = state_dict.pop(snake_case__ ) __UpperCAmelCase : Optional[Any] = val def _UpperCamelCase ( snake_case__ ) -> Any: __UpperCAmelCase : Any = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: __UpperCAmelCase : int = key.replace("backbone.0.body", "backbone.conv_encoder.model" ) __UpperCAmelCase : Optional[int] = value else: __UpperCAmelCase : Dict = value return new_state_dict def _UpperCamelCase ( snake_case__ ) -> Optional[int]: __UpperCAmelCase : int = "" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) __UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) __UpperCAmelCase : Optional[int] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict __UpperCAmelCase : str = in_proj_weight[:256, :] __UpperCAmelCase : Dict = in_proj_bias[:256] __UpperCAmelCase : Union[str, Any] = in_proj_weight[256:512, :] __UpperCAmelCase : Optional[Any] = in_proj_bias[256:512] __UpperCAmelCase : int = in_proj_weight[-256:, :] __UpperCAmelCase : Tuple = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention __UpperCAmelCase : Optional[int] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) __UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict __UpperCAmelCase : str = in_proj_weight[:256, :] __UpperCAmelCase : Union[str, Any] = in_proj_bias[:256] __UpperCAmelCase : str = in_proj_weight[256:512, :] __UpperCAmelCase : Tuple = in_proj_bias[256:512] __UpperCAmelCase : Dict = in_proj_weight[-256:, :] __UpperCAmelCase : Union[str, Any] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention __UpperCAmelCase : Dict = state_dict.pop( f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) __UpperCAmelCase : int = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict __UpperCAmelCase : int = in_proj_weight_cross_attn[:256, :] __UpperCAmelCase : Tuple = in_proj_bias_cross_attn[:256] __UpperCAmelCase : Optional[Any] = in_proj_weight_cross_attn[256:512, :] __UpperCAmelCase : str = in_proj_bias_cross_attn[256:512] __UpperCAmelCase : List[str] = in_proj_weight_cross_attn[-256:, :] __UpperCAmelCase : str = in_proj_bias_cross_attn[-256:] def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[Any]: __UpperCAmelCase , __UpperCAmelCase : List[Any] = image.size __UpperCAmelCase : Optional[Any] = max(snake_case__, snake_case__ ) __UpperCAmelCase : Optional[Any] = 800 if "detection" in checkpoint_url else 1000 __UpperCAmelCase : List[str] = target_max_size / current_max_size __UpperCAmelCase : List[Any] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def _UpperCamelCase ( snake_case__ ) -> Tuple: __UpperCAmelCase : Tuple = F.to_tensor(snake_case__ ) __UpperCAmelCase : Dict = F.normalize(snake_case__, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Optional[Any]: logger.info("Converting model..." ) # load original state dict __UpperCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" ) # rename keys for src, dest in rename_keys: rename_key(snake_case__, snake_case__, snake_case__ ) __UpperCAmelCase : str = rename_backbone_keys(snake_case__ ) # query, key and value matrices need special treatment read_in_q_k_v(snake_case__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them __UpperCAmelCase : List[Any] = "model." for key in state_dict.copy().keys(): if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): __UpperCAmelCase : List[Any] = state_dict.pop(snake_case__ ) __UpperCAmelCase : Optional[int] = val # create HuggingFace model and load state dict __UpperCAmelCase : Union[str, Any] = TableTransformerConfig( backbone="resnet18", mask_loss_coefficient=1, dice_loss_coefficient=1, ce_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.4, class_cost=1, bbox_cost=5, giou_cost=2, ) if "detection" in checkpoint_url: __UpperCAmelCase : List[Any] = 15 __UpperCAmelCase : Optional[int] = 2 __UpperCAmelCase : Optional[int] = {0: "table", 1: "table rotated"} __UpperCAmelCase : Any = idalabel __UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()} else: __UpperCAmelCase : Union[str, Any] = 125 __UpperCAmelCase : Optional[Any] = 6 __UpperCAmelCase : List[str] = { 0: "table", 1: "table column", 2: "table row", 3: "table column header", 4: "table projected row header", 5: "table spanning cell", } __UpperCAmelCase : str = idalabel __UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()} __UpperCAmelCase : Any = DetrImageProcessor( format="coco_detection", max_size=800 if "detection" in checkpoint_url else 1000 ) __UpperCAmelCase : Any = TableTransformerForObjectDetection(snake_case__ ) model.load_state_dict(snake_case__ ) model.eval() # verify our conversion __UpperCAmelCase : Optional[int] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png" __UpperCAmelCase : Tuple = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename=snake_case__ ) __UpperCAmelCase : Any = Image.open(snake_case__ ).convert("RGB" ) __UpperCAmelCase : Optional[int] = normalize(resize(snake_case__, snake_case__ ) ).unsqueeze(0 ) __UpperCAmelCase : Union[str, Any] = model(snake_case__ ) if "detection" in checkpoint_url: __UpperCAmelCase : Optional[Any] = (1, 15, 3) __UpperCAmelCase : Any = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) __UpperCAmelCase : str = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: __UpperCAmelCase : Tuple = (1, 125, 7) __UpperCAmelCase : Any = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) __UpperCAmelCase : List[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3], snake_case__, atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3], snake_case__, atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) model.save_pretrained(snake_case__ ) image_processor.save_pretrained(snake_case__ ) if push_to_hub: # Push model to HF hub logger.info("Pushing model to the hub..." ) __UpperCAmelCase : Tuple = ( "microsoft/table-transformer-detection" if "detection" in checkpoint_url else "microsoft/table-transformer-structure-recognition" ) model.push_to_hub(snake_case__ ) image_processor.push_to_hub(snake_case__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', type=str, choices=[ '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''', ], help='''URL of the Table Transformer checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _snake_case = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
342
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class _snake_case ( _lowercase ): lowerCamelCase__: Dict = "roc_bert" def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]: __UpperCAmelCase : str = vocab_size __UpperCAmelCase : Dict = max_position_embeddings __UpperCAmelCase : Optional[Any] = hidden_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : List[Any] = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : Optional[int] = attention_probs_dropout_prob __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : Optional[Any] = type_vocab_size __UpperCAmelCase : List[Any] = layer_norm_eps __UpperCAmelCase : Optional[int] = use_cache __UpperCAmelCase : Optional[Any] = enable_pronunciation __UpperCAmelCase : Any = enable_shape __UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim __UpperCAmelCase : Optional[Any] = pronunciation_vocab_size __UpperCAmelCase : Optional[Any] = shape_embed_dim __UpperCAmelCase : List[Any] = shape_vocab_size __UpperCAmelCase : int = concat_input __UpperCAmelCase : int = position_embedding_type __UpperCAmelCase : Optional[int] = classifier_dropout super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
342
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = '''▁''' _snake_case = {'''vocab_file''': '''sentencepiece.bpe.model'''} _snake_case = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model''' ), } } _snake_case = { '''facebook/nllb-200-distilled-600M''': 1024, } # fmt: off _snake_case = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class _snake_case ( _lowercase ): lowerCamelCase__: Optional[Any] = VOCAB_FILES_NAMES lowerCamelCase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__: Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__: Dict = ["input_ids", "attention_mask"] lowerCamelCase__: List[int] = [] lowerCamelCase__: List[int] = [] def __init__( self: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict="<s>" , __lowerCamelCase: Optional[int]="</s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: Any="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Optional[Any]="<pad>" , __lowerCamelCase: int="<mask>" , __lowerCamelCase: str=None , __lowerCamelCase: Dict=None , __lowerCamelCase: str=None , __lowerCamelCase: Optional[Dict[str, Any]] = None , __lowerCamelCase: Tuple=None , __lowerCamelCase: List[str]=False , **__lowerCamelCase: List[Any] , ) -> Any: # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token __UpperCAmelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs __UpperCAmelCase : Optional[int] = legacy_behaviour super().__init__( bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenizer_file=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , ) __UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowerCamelCase ) ) __UpperCAmelCase : Optional[int] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token __UpperCAmelCase : int = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __UpperCAmelCase : Dict = 1 __UpperCAmelCase : Union[str, Any] = len(self.sp_model ) __UpperCAmelCase : Optional[int] = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__lowerCamelCase ) } __UpperCAmelCase : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()} __UpperCAmelCase : List[str] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __UpperCAmelCase : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __UpperCAmelCase : str = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) __UpperCAmelCase : int = src_lang if src_lang is not None else "eng_Latn" __UpperCAmelCase : List[Any] = self.lang_code_to_id[self._src_lang] __UpperCAmelCase : Optional[int] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self: List[str] ) -> str: __UpperCAmelCase : Optional[int] = self.__dict__.copy() __UpperCAmelCase : Dict = None __UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto() return state def __setstate__( self: Any , __lowerCamelCase: int ) -> Optional[Any]: __UpperCAmelCase : List[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __UpperCAmelCase : Any = {} __UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def _lowerCamelCase ( self: Optional[Any] ) -> Dict: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _lowerCamelCase ( self: Optional[int] ) -> str: return self._src_lang @src_lang.setter def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: str ) -> None: __UpperCAmelCase : str = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) __UpperCAmelCase : Dict = [1] * len(self.prefix_tokens ) __UpperCAmelCase : Any = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__lowerCamelCase )) + suffix_ones return prefix_ones + ([0] * len(__lowerCamelCase )) + ([0] * len(__lowerCamelCase )) + suffix_ones def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]: __UpperCAmelCase : str = [self.sep_token_id] __UpperCAmelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Optional[int] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] , __lowerCamelCase: Optional[str] , **__lowerCamelCase: Dict ) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) __UpperCAmelCase : Any = src_lang __UpperCAmelCase : Optional[Any] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = self.convert_tokens_to_ids(__lowerCamelCase ) __UpperCAmelCase : int = tgt_lang_id return inputs def _lowerCamelCase ( self: Union[str, Any] ) -> int: __UpperCAmelCase : Optional[int] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> List[str]: return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Tuple ) -> List[str]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __UpperCAmelCase : List[Any] = self.sp_model.PieceToId(__lowerCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCamelCase ( self: str , __lowerCamelCase: int ) -> Tuple: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowerCamelCase ( self: str , __lowerCamelCase: Tuple ) -> str: __UpperCAmelCase : Union[str, Any] = "".join(__lowerCamelCase ).replace(__lowerCamelCase , " " ).strip() return out_string def _lowerCamelCase ( self: Dict , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase : int = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: __UpperCAmelCase : Dict = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,) def _lowerCamelCase ( self: str , __lowerCamelCase: List[str] , __lowerCamelCase: str = "eng_Latn" , __lowerCamelCase: Optional[List[str]] = None , __lowerCamelCase: str = "fra_Latn" , **__lowerCamelCase: Optional[Any] , ) -> BatchEncoding: __UpperCAmelCase : Tuple = src_lang __UpperCAmelCase : Tuple = tgt_lang return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: Tuple ) -> Any: return self.set_src_lang_special_tokens(self.src_lang ) def _lowerCamelCase ( self: Optional[Any] ) -> Any: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _lowerCamelCase ( self: Any , __lowerCamelCase: List[str] ) -> None: __UpperCAmelCase : Union[str, Any] = self.lang_code_to_id[src_lang] if self.legacy_behaviour: __UpperCAmelCase : Union[str, Any] = [] __UpperCAmelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code] else: __UpperCAmelCase : Union[str, Any] = [self.cur_lang_code] __UpperCAmelCase : Optional[Any] = [self.eos_token_id] def _lowerCamelCase ( self: str , __lowerCamelCase: str ) -> None: __UpperCAmelCase : Optional[int] = self.lang_code_to_id[lang] if self.legacy_behaviour: __UpperCAmelCase : str = [] __UpperCAmelCase : Any = [self.eos_token_id, self.cur_lang_code] else: __UpperCAmelCase : str = [self.cur_lang_code] __UpperCAmelCase : Any = [self.eos_token_id]
342
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def _UpperCamelCase ( snake_case__ ) -> int: __UpperCAmelCase : int = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: __UpperCAmelCase : int = [144, 192, 240] __UpperCAmelCase : Optional[Any] = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: __UpperCAmelCase : Optional[Any] = [96, 120, 144] __UpperCAmelCase : Tuple = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: __UpperCAmelCase : str = [64, 80, 96] __UpperCAmelCase : Optional[Any] = [16, 16, 24, 48, 64, 80, 320] __UpperCAmelCase : Tuple = 0.05 __UpperCAmelCase : Dict = 2.0 if mobilevit_name.startswith("deeplabv3_" ): __UpperCAmelCase : str = 512 __UpperCAmelCase : Any = 16 __UpperCAmelCase : str = 21 __UpperCAmelCase : Union[str, Any] = "pascal-voc-id2label.json" else: __UpperCAmelCase : Optional[Any] = 1000 __UpperCAmelCase : int = "imagenet-1k-id2label.json" __UpperCAmelCase : Dict = "huggingface/label-files" __UpperCAmelCase : int = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) ) __UpperCAmelCase : Any = {int(snake_case__ ): v for k, v in idalabel.items()} __UpperCAmelCase : int = idalabel __UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()} return config def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Tuple: for i in range(1, 6 ): if f'''layer_{i}.''' in name: __UpperCAmelCase : Tuple = name.replace(f'''layer_{i}.''', f'''encoder.layer.{i - 1}.''' ) if "conv_1." in name: __UpperCAmelCase : Dict = name.replace("conv_1.", "conv_stem." ) if ".block." in name: __UpperCAmelCase : Optional[int] = name.replace(".block.", "." ) if "exp_1x1" in name: __UpperCAmelCase : Tuple = name.replace("exp_1x1", "expand_1x1" ) if "red_1x1" in name: __UpperCAmelCase : Optional[Any] = name.replace("red_1x1", "reduce_1x1" ) if ".local_rep.conv_3x3." in name: __UpperCAmelCase : Optional[int] = name.replace(".local_rep.conv_3x3.", ".conv_kxk." ) if ".local_rep.conv_1x1." in name: __UpperCAmelCase : Any = name.replace(".local_rep.conv_1x1.", ".conv_1x1." ) if ".norm." in name: __UpperCAmelCase : Dict = name.replace(".norm.", ".normalization." ) if ".conv." in name: __UpperCAmelCase : List[Any] = name.replace(".conv.", ".convolution." ) if ".conv_proj." in name: __UpperCAmelCase : List[str] = name.replace(".conv_proj.", ".conv_projection." ) for i in range(0, 2 ): for j in range(0, 4 ): if f'''.{i}.{j}.''' in name: __UpperCAmelCase : List[Any] = name.replace(f'''.{i}.{j}.''', f'''.{i}.layer.{j}.''' ) for i in range(2, 6 ): for j in range(0, 4 ): if f'''.{i}.{j}.''' in name: __UpperCAmelCase : Any = name.replace(f'''.{i}.{j}.''', f'''.{i}.''' ) if "expand_1x1" in name: __UpperCAmelCase : Optional[int] = name.replace("expand_1x1", "downsampling_layer.expand_1x1" ) if "conv_3x3" in name: __UpperCAmelCase : List[Any] = name.replace("conv_3x3", "downsampling_layer.conv_3x3" ) if "reduce_1x1" in name: __UpperCAmelCase : Dict = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" ) for i in range(2, 5 ): if f'''.global_rep.{i}.weight''' in name: __UpperCAmelCase : Any = name.replace(f'''.global_rep.{i}.weight''', ".layernorm.weight" ) if f'''.global_rep.{i}.bias''' in name: __UpperCAmelCase : Optional[Any] = name.replace(f'''.global_rep.{i}.bias''', ".layernorm.bias" ) if ".global_rep." in name: __UpperCAmelCase : Tuple = name.replace(".global_rep.", ".transformer." ) if ".pre_norm_mha.0." in name: __UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_mha.0.", ".layernorm_before." ) if ".pre_norm_mha.1.out_proj." in name: __UpperCAmelCase : Tuple = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." ) if ".pre_norm_ffn.0." in name: __UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_ffn.0.", ".layernorm_after." ) if ".pre_norm_ffn.1." in name: __UpperCAmelCase : Dict = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." ) if ".pre_norm_ffn.4." in name: __UpperCAmelCase : int = name.replace(".pre_norm_ffn.4.", ".output.dense." ) if ".transformer." in name: __UpperCAmelCase : Tuple = name.replace(".transformer.", ".transformer.layer." ) if ".aspp_layer." in name: __UpperCAmelCase : Any = name.replace(".aspp_layer.", "." ) if ".aspp_pool." in name: __UpperCAmelCase : Optional[Any] = name.replace(".aspp_pool.", "." ) if "seg_head." in name: __UpperCAmelCase : Optional[int] = name.replace("seg_head.", "segmentation_head." ) if "segmentation_head.classifier.classifier." in name: __UpperCAmelCase : str = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." ) if "classifier.fc." in name: __UpperCAmelCase : Optional[Any] = name.replace("classifier.fc.", "classifier." ) elif (not base_model) and ("segmentation_head." not in name): __UpperCAmelCase : List[str] = "mobilevit." + name return name def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=False ) -> Union[str, Any]: if base_model: __UpperCAmelCase : Optional[int] = "" else: __UpperCAmelCase : Tuple = "mobilevit." for key in orig_state_dict.copy().keys(): __UpperCAmelCase : Optional[int] = orig_state_dict.pop(snake_case__ ) if key[:8] == "encoder.": __UpperCAmelCase : str = key[8:] if "qkv" in key: __UpperCAmelCase : Tuple = key.split("." ) __UpperCAmelCase : List[Any] = int(key_split[0][6:] ) - 1 __UpperCAmelCase : Optional[Any] = int(key_split[3] ) __UpperCAmelCase : Tuple = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' ) __UpperCAmelCase : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size __UpperCAmelCase : Optional[Any] = ( f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.''' ) if "weight" in key: __UpperCAmelCase : Any = val[:dim, :] __UpperCAmelCase : Any = val[dim : dim * 2, :] __UpperCAmelCase : List[Any] = val[-dim:, :] else: __UpperCAmelCase : List[str] = val[:dim] __UpperCAmelCase : Optional[Any] = val[dim : dim * 2] __UpperCAmelCase : List[Any] = val[-dim:] else: __UpperCAmelCase : str = val return orig_state_dict def _UpperCamelCase ( ) -> Any: __UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" __UpperCAmelCase : List[str] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ) return im @torch.no_grad() def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=False ) -> Optional[Any]: __UpperCAmelCase : Tuple = get_mobilevit_config(snake_case__ ) # load original state_dict __UpperCAmelCase : str = torch.load(snake_case__, map_location="cpu" ) # load 🤗 model if mobilevit_name.startswith("deeplabv3_" ): __UpperCAmelCase : Optional[int] = MobileViTForSemanticSegmentation(snake_case__ ).eval() else: __UpperCAmelCase : List[Any] = MobileViTForImageClassification(snake_case__ ).eval() __UpperCAmelCase : Dict = convert_state_dict(snake_case__, snake_case__ ) model.load_state_dict(snake_case__ ) # Check outputs on an image, prepared by MobileViTImageProcessor __UpperCAmelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 ) __UpperCAmelCase : Any = image_processor(images=prepare_img(), return_tensors="pt" ) __UpperCAmelCase : Dict = model(**snake_case__ ) __UpperCAmelCase : Tuple = outputs.logits if mobilevit_name.startswith("deeplabv3_" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": __UpperCAmelCase : int = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": __UpperCAmelCase : Tuple = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": __UpperCAmelCase : Any = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ] ) else: raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3, :3, :3], snake_case__, atol=1e-4 ) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": __UpperCAmelCase : str = torch.tensor([-0.9866, 0.2392, -1.1241] ) elif mobilevit_name == "mobilevit_xs": __UpperCAmelCase : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] ) elif mobilevit_name == "mobilevit_xxs": __UpperCAmelCase : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ) else: raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3], snake_case__, atol=1e-4 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case__ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case__ ) if push_to_hub: __UpperCAmelCase : List[str] = { "mobilevit_s": "mobilevit-small", "mobilevit_xs": "mobilevit-x-small", "mobilevit_xxs": "mobilevit-xx-small", "deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small", "deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small", "deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small", } print("Pushing to the hub..." ) __UpperCAmelCase : int = model_mapping[mobilevit_name] image_processor.push_to_hub(snake_case__, organization="apple" ) model.push_to_hub(snake_case__, organization="apple" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--mobilevit_name''', default='''mobilevit_s''', type=str, help=( '''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',''' ''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.''' ), ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _snake_case = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
342
1
import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def _UpperCamelCase ( snake_case__ ) -> Union[str, Any]: __UpperCAmelCase : str = SwinConfig() __UpperCAmelCase : Optional[Any] = swin_name.split("_" ) __UpperCAmelCase : List[str] = name_split[1] __UpperCAmelCase : List[Any] = int(name_split[4] ) __UpperCAmelCase : List[str] = int(name_split[3][-1] ) if model_size == "tiny": __UpperCAmelCase : Optional[Any] = 96 __UpperCAmelCase : Optional[Any] = (2, 2, 6, 2) __UpperCAmelCase : Tuple = (3, 6, 12, 24) elif model_size == "small": __UpperCAmelCase : Any = 96 __UpperCAmelCase : List[str] = (2, 2, 18, 2) __UpperCAmelCase : List[str] = (3, 6, 12, 24) elif model_size == "base": __UpperCAmelCase : Any = 128 __UpperCAmelCase : Tuple = (2, 2, 18, 2) __UpperCAmelCase : Tuple = (4, 8, 16, 32) else: __UpperCAmelCase : Optional[int] = 192 __UpperCAmelCase : List[str] = (2, 2, 18, 2) __UpperCAmelCase : Optional[Any] = (6, 12, 24, 48) if "in22k" in swin_name: __UpperCAmelCase : Optional[int] = 2_1841 else: __UpperCAmelCase : Optional[Any] = 1000 __UpperCAmelCase : Optional[int] = "huggingface/label-files" __UpperCAmelCase : Tuple = "imagenet-1k-id2label.json" __UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) ) __UpperCAmelCase : Tuple = {int(snake_case__ ): v for k, v in idalabel.items()} __UpperCAmelCase : List[str] = idalabel __UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()} __UpperCAmelCase : Tuple = img_size __UpperCAmelCase : str = num_classes __UpperCAmelCase : int = embed_dim __UpperCAmelCase : List[Any] = depths __UpperCAmelCase : int = num_heads __UpperCAmelCase : List[str] = window_size return config def _UpperCamelCase ( snake_case__ ) -> Any: if "patch_embed.proj" in name: __UpperCAmelCase : List[str] = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __UpperCAmelCase : List[str] = name.replace("patch_embed.norm", "embeddings.norm" ) if "layers" in name: __UpperCAmelCase : Optional[Any] = "encoder." + name if "attn.proj" in name: __UpperCAmelCase : Union[str, Any] = name.replace("attn.proj", "attention.output.dense" ) if "attn" in name: __UpperCAmelCase : Any = name.replace("attn", "attention.self" ) if "norm1" in name: __UpperCAmelCase : Tuple = name.replace("norm1", "layernorm_before" ) if "norm2" in name: __UpperCAmelCase : List[str] = name.replace("norm2", "layernorm_after" ) if "mlp.fc1" in name: __UpperCAmelCase : Optional[Any] = name.replace("mlp.fc1", "intermediate.dense" ) if "mlp.fc2" in name: __UpperCAmelCase : List[Any] = name.replace("mlp.fc2", "output.dense" ) if name == "norm.weight": __UpperCAmelCase : Any = "layernorm.weight" if name == "norm.bias": __UpperCAmelCase : Optional[int] = "layernorm.bias" if "head" in name: __UpperCAmelCase : str = name.replace("head", "classifier" ) else: __UpperCAmelCase : int = "swin." + name return name def _UpperCamelCase ( snake_case__, snake_case__ ) -> str: for key in orig_state_dict.copy().keys(): __UpperCAmelCase : Dict = orig_state_dict.pop(snake_case__ ) if "mask" in key: continue elif "qkv" in key: __UpperCAmelCase : Any = key.split("." ) __UpperCAmelCase : Union[str, Any] = int(key_split[1] ) __UpperCAmelCase : Optional[int] = int(key_split[3] ) __UpperCAmelCase : Union[str, Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __UpperCAmelCase : Dict = val[:dim, :] __UpperCAmelCase : Tuple = val[ dim : dim * 2, : ] __UpperCAmelCase : Any = val[-dim:, :] else: __UpperCAmelCase : Optional[int] = val[ :dim ] __UpperCAmelCase : Any = val[ dim : dim * 2 ] __UpperCAmelCase : Union[str, Any] = val[ -dim: ] else: __UpperCAmelCase : str = val return orig_state_dict def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[str]: __UpperCAmelCase : Tuple = timm.create_model(snake_case__, pretrained=snake_case__ ) timm_model.eval() __UpperCAmelCase : Union[str, Any] = get_swin_config(snake_case__ ) __UpperCAmelCase : Optional[Any] = SwinForImageClassification(snake_case__ ) model.eval() __UpperCAmelCase : List[Any] = convert_state_dict(timm_model.state_dict(), snake_case__ ) model.load_state_dict(snake_case__ ) __UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" __UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_", "-" ) ) ) __UpperCAmelCase : int = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ) __UpperCAmelCase : Tuple = image_processor(images=snake_case__, return_tensors="pt" ) __UpperCAmelCase : int = timm_model(inputs["pixel_values"] ) __UpperCAmelCase : Any = model(**snake_case__ ).logits assert torch.allclose(snake_case__, snake_case__, atol=1e-3 ) print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case__ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swin_name''', default='''swin_tiny_patch4_window7_224''', type=str, help='''Name of the Swin timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) _snake_case = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
342
import math _snake_case = 10 _snake_case = 7 _snake_case = BALLS_PER_COLOUR * NUM_COLOURS def _UpperCamelCase ( snake_case__ = 20 ) -> str: __UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ ) __UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ ) __UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total) return f'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
342
1
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig _snake_case = logging.get_logger(__name__) # General docstring _snake_case = '''MobileNetV1Config''' # Base docstring _snake_case = '''google/mobilenet_v1_1.0_224''' _snake_case = [1, 1024, 7, 7] # Image classification docstring _snake_case = '''google/mobilenet_v1_1.0_224''' _snake_case = '''tabby, tabby cat''' _snake_case = [ '''google/mobilenet_v1_1.0_224''', '''google/mobilenet_v1_0.75_192''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=None ) -> Tuple: __UpperCAmelCase : Dict = {} if isinstance(snake_case__, snake_case__ ): __UpperCAmelCase : Optional[int] = model.mobilenet_va else: __UpperCAmelCase : str = model __UpperCAmelCase : Optional[Any] = "MobilenetV1/Conv2d_0/" __UpperCAmelCase : Any = backbone.conv_stem.convolution.weight __UpperCAmelCase : List[Any] = backbone.conv_stem.normalization.bias __UpperCAmelCase : List[Any] = backbone.conv_stem.normalization.weight __UpperCAmelCase : int = backbone.conv_stem.normalization.running_mean __UpperCAmelCase : Dict = backbone.conv_stem.normalization.running_var for i in range(13 ): __UpperCAmelCase : Any = i + 1 __UpperCAmelCase : int = i * 2 __UpperCAmelCase : Dict = backbone.layer[pt_index] __UpperCAmelCase : List[str] = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/''' __UpperCAmelCase : List[Any] = pointer.convolution.weight __UpperCAmelCase : str = pointer.normalization.bias __UpperCAmelCase : Dict = pointer.normalization.weight __UpperCAmelCase : Any = pointer.normalization.running_mean __UpperCAmelCase : Tuple = pointer.normalization.running_var __UpperCAmelCase : Optional[Any] = backbone.layer[pt_index + 1] __UpperCAmelCase : str = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/''' __UpperCAmelCase : Optional[int] = pointer.convolution.weight __UpperCAmelCase : Tuple = pointer.normalization.bias __UpperCAmelCase : Any = pointer.normalization.weight __UpperCAmelCase : Optional[Any] = pointer.normalization.running_mean __UpperCAmelCase : Any = pointer.normalization.running_var if isinstance(snake_case__, snake_case__ ): __UpperCAmelCase : List[str] = "MobilenetV1/Logits/Conv2d_1c_1x1/" __UpperCAmelCase : str = model.classifier.weight __UpperCAmelCase : Tuple = model.classifier.bias return tf_to_pt_map def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> List[Any]: try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Load weights from TF model __UpperCAmelCase : str = tf.train.list_variables(snake_case__ ) __UpperCAmelCase : Tuple = {} for name, shape in init_vars: logger.info(f'''Loading TF weight {name} with shape {shape}''' ) __UpperCAmelCase : int = tf.train.load_variable(snake_case__, snake_case__ ) __UpperCAmelCase : Tuple = array # Build TF to PyTorch weights loading map __UpperCAmelCase : Union[str, Any] = _build_tf_to_pytorch_map(snake_case__, snake_case__, snake_case__ ) for name, pointer in tf_to_pt_map.items(): logger.info(f'''Importing {name}''' ) if name not in tf_weights: logger.info(f'''{name} not in tf pre-trained weights, skipping''' ) continue __UpperCAmelCase : List[Any] = tf_weights[name] if "depthwise_weights" in name: logger.info("Transposing depthwise" ) __UpperCAmelCase : Optional[Any] = np.transpose(snake_case__, (2, 3, 0, 1) ) elif "weights" in name: logger.info("Transposing" ) if len(pointer.shape ) == 2: # copying into linear layer __UpperCAmelCase : int = array.squeeze().transpose() else: __UpperCAmelCase : Optional[Any] = np.transpose(snake_case__, (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' ) logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' ) __UpperCAmelCase : Optional[Any] = torch.from_numpy(snake_case__ ) tf_weights.pop(snake_case__, snake_case__ ) tf_weights.pop(name + "/RMSProp", snake_case__ ) tf_weights.pop(name + "/RMSProp_1", snake_case__ ) tf_weights.pop(name + "/ExponentialMovingAverage", snake_case__ ) logger.info(f'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' ) return model def _UpperCamelCase ( snake_case__, snake_case__ ) -> torch.Tensor: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = features.shape[-2:] __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = conv_layer.stride __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = conv_layer.kernel_size if in_height % stride_height == 0: __UpperCAmelCase : Optional[Any] = max(kernel_height - stride_height, 0 ) else: __UpperCAmelCase : Optional[int] = max(kernel_height - (in_height % stride_height), 0 ) if in_width % stride_width == 0: __UpperCAmelCase : Tuple = max(kernel_width - stride_width, 0 ) else: __UpperCAmelCase : Optional[Any] = max(kernel_width - (in_width % stride_width), 0 ) __UpperCAmelCase : List[str] = pad_along_width // 2 __UpperCAmelCase : List[str] = pad_along_width - pad_left __UpperCAmelCase : int = pad_along_height // 2 __UpperCAmelCase : List[str] = pad_along_height - pad_top __UpperCAmelCase : int = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(snake_case__, snake_case__, "constant", 0.0 ) class _snake_case ( nn.Module ): def __init__( self: Dict , __lowerCamelCase: MobileNetVaConfig , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] = 1 , __lowerCamelCase: Optional[int] = 1 , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[bool] = True , __lowerCamelCase: Optional[bool or str] = True , ) -> None: super().__init__() __UpperCAmelCase : Optional[int] = config if in_channels % groups != 0: raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' ) if out_channels % groups != 0: raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' ) __UpperCAmelCase : str = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) __UpperCAmelCase : str = nn.Convad( in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=__lowerCamelCase , stride=__lowerCamelCase , padding=__lowerCamelCase , groups=__lowerCamelCase , bias=__lowerCamelCase , padding_mode="zeros" , ) if use_normalization: __UpperCAmelCase : str = nn.BatchNormad( num_features=__lowerCamelCase , eps=config.layer_norm_eps , momentum=0.99_97 , affine=__lowerCamelCase , track_running_stats=__lowerCamelCase , ) else: __UpperCAmelCase : Any = None if use_activation: if isinstance(__lowerCamelCase , __lowerCamelCase ): __UpperCAmelCase : Tuple = ACTaFN[use_activation] elif isinstance(config.hidden_act , __lowerCamelCase ): __UpperCAmelCase : int = ACTaFN[config.hidden_act] else: __UpperCAmelCase : Union[str, Any] = config.hidden_act else: __UpperCAmelCase : List[Any] = None def _lowerCamelCase ( self: Tuple , __lowerCamelCase: torch.Tensor ) -> torch.Tensor: if self.config.tf_padding: __UpperCAmelCase : Tuple = apply_tf_padding(__lowerCamelCase , self.convolution ) __UpperCAmelCase : Any = self.convolution(__lowerCamelCase ) if self.normalization is not None: __UpperCAmelCase : Any = self.normalization(__lowerCamelCase ) if self.activation is not None: __UpperCAmelCase : List[Any] = self.activation(__lowerCamelCase ) return features class _snake_case ( _lowercase ): lowerCamelCase__: Optional[int] = MobileNetVaConfig lowerCamelCase__: List[str] = load_tf_weights_in_mobilenet_va lowerCamelCase__: List[Any] = "mobilenet_v1" lowerCamelCase__: Dict = "pixel_values" lowerCamelCase__: int = False def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[nn.Linear, nn.Convad] ) -> None: if isinstance(__lowerCamelCase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__lowerCamelCase , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) _snake_case = r''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' _snake_case = r''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , _lowercase , ) class _snake_case ( _lowercase ): def __init__( self: Optional[int] , __lowerCamelCase: MobileNetVaConfig , __lowerCamelCase: bool = True ) -> Union[str, Any]: super().__init__(__lowerCamelCase ) __UpperCAmelCase : int = config __UpperCAmelCase : Optional[Any] = 32 __UpperCAmelCase : List[str] = max(int(depth * config.depth_multiplier ) , config.min_depth ) __UpperCAmelCase : Dict = MobileNetVaConvLayer( __lowerCamelCase , in_channels=config.num_channels , out_channels=__lowerCamelCase , kernel_size=3 , stride=2 , ) __UpperCAmelCase : Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __UpperCAmelCase : str = nn.ModuleList() for i in range(13 ): __UpperCAmelCase : List[str] = out_channels if strides[i] == 2 or i == 0: depth *= 2 __UpperCAmelCase : Optional[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=3 , stride=strides[i] , groups=__lowerCamelCase , ) ) self.layer.append( MobileNetVaConvLayer( __lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=1 , ) ) __UpperCAmelCase : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Any ) -> int: raise NotImplementedError @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Optional[torch.Tensor] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: __UpperCAmelCase : Union[str, Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCAmelCase : int = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) __UpperCAmelCase : Optional[int] = self.conv_stem(__lowerCamelCase ) __UpperCAmelCase : Tuple = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): __UpperCAmelCase : List[str] = layer_module(__lowerCamelCase ) if output_hidden_states: __UpperCAmelCase : Any = all_hidden_states + (hidden_states,) __UpperCAmelCase : Any = hidden_states if self.pooler is not None: __UpperCAmelCase : Union[str, Any] = torch.flatten(self.pooler(__lowerCamelCase ) , start_dim=1 ) else: __UpperCAmelCase : List[Any] = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__lowerCamelCase , pooler_output=__lowerCamelCase , hidden_states=__lowerCamelCase , ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _lowercase , ) class _snake_case ( _lowercase ): def __init__( self: int , __lowerCamelCase: MobileNetVaConfig ) -> None: super().__init__(__lowerCamelCase ) __UpperCAmelCase : str = config.num_labels __UpperCAmelCase : Any = MobileNetVaModel(__lowerCamelCase ) __UpperCAmelCase : List[Any] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __UpperCAmelCase : Dict = nn.Dropout(config.classifier_dropout_prob , inplace=__lowerCamelCase ) __UpperCAmelCase : List[str] = nn.Linear(__lowerCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Optional[torch.Tensor] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: Optional[torch.Tensor] = None , __lowerCamelCase: Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: __UpperCAmelCase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict __UpperCAmelCase : Dict = self.mobilenet_va(__lowerCamelCase , output_hidden_states=__lowerCamelCase , return_dict=__lowerCamelCase ) __UpperCAmelCase : str = outputs.pooler_output if return_dict else outputs[1] __UpperCAmelCase : Optional[Any] = self.classifier(self.dropout(__lowerCamelCase ) ) __UpperCAmelCase : int = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __UpperCAmelCase : Tuple = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __UpperCAmelCase : str = "single_label_classification" else: __UpperCAmelCase : Optional[Any] = "multi_label_classification" if self.config.problem_type == "regression": __UpperCAmelCase : Optional[int] = MSELoss() if self.num_labels == 1: __UpperCAmelCase : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() ) else: __UpperCAmelCase : Optional[Any] = loss_fct(__lowerCamelCase , __lowerCamelCase ) elif self.config.problem_type == "single_label_classification": __UpperCAmelCase : str = CrossEntropyLoss() __UpperCAmelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __UpperCAmelCase : List[str] = BCEWithLogitsLoss() __UpperCAmelCase : List[str] = loss_fct(__lowerCamelCase , __lowerCamelCase ) if not return_dict: __UpperCAmelCase : Tuple = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__lowerCamelCase , logits=__lowerCamelCase , hidden_states=outputs.hidden_states , )
342
def _UpperCamelCase ( snake_case__ ) -> int: __UpperCAmelCase : int = [0] * len(snake_case__ ) __UpperCAmelCase : Union[str, Any] = [] __UpperCAmelCase : str = [1] * len(snake_case__ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(snake_case__ ) ): if indegree[i] == 0: queue.append(snake_case__ ) while queue: __UpperCAmelCase : List[str] = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: __UpperCAmelCase : str = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(snake_case__ ) print(max(snake_case__ ) ) # Adjacency list of Graph _snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
342
1
from string import ascii_lowercase, ascii_uppercase def _UpperCamelCase ( snake_case__ ) -> str: if not sentence: return "" __UpperCAmelCase : Optional[Any] = dict(zip(snake_case__, snake_case__ ) ) return lower_to_upper.get(sentence[0], sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
342
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _snake_case = { '''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''], '''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''], '''processing_whisper''': ['''WhisperProcessor'''], '''tokenization_whisper''': ['''WhisperTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''WhisperTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WhisperForConditionalGeneration''', '''WhisperModel''', '''WhisperPreTrainedModel''', '''WhisperForAudioClassification''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWhisperForConditionalGeneration''', '''TFWhisperModel''', '''TFWhisperPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''FlaxWhisperForConditionalGeneration''', '''FlaxWhisperModel''', '''FlaxWhisperPreTrainedModel''', '''FlaxWhisperForAudioClassification''', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
342
1
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Optional[int]: # Initialise PyTorch model __UpperCAmelCase : Tuple = AlbertConfig.from_json_file(snake_case__ ) print(f'''Building PyTorch model from configuration: {config}''' ) __UpperCAmelCase : Optional[Any] = AlbertForPreTraining(snake_case__ ) # Load weights from tf checkpoint load_tf_weights_in_albert(snake_case__, snake_case__, snake_case__ ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), snake_case__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--albert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained ALBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _snake_case = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
342
from __future__ import annotations from math import pi def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]: if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
342
1
from __future__ import annotations from random import choice def _UpperCamelCase ( snake_case__ ) -> int: return choice(snake_case__ ) def _UpperCamelCase ( snake_case__, snake_case__ ) -> int: __UpperCAmelCase : List[Any] = random_pivot(snake_case__ ) # partition based on pivot # linear time __UpperCAmelCase : str = [e for e in lst if e < pivot] __UpperCAmelCase : int = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(snake_case__ ) == k - 1: return pivot # pivot is in elements bigger than k elif len(snake_case__ ) < k - 1: return kth_number(snake_case__, k - len(snake_case__ ) - 1 ) # pivot is in elements smaller than k else: return kth_number(snake_case__, snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
342
import flax.linen as nn import jax import jax.numpy as jnp class _snake_case ( nn.Module ): lowerCamelCase__: int lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]: __UpperCAmelCase : List[str] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Optional[Any] , __lowerCamelCase: Optional[int] ) -> List[Any]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = hidden_states.shape __UpperCAmelCase : Dict = jax.image.resize( __lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , ) __UpperCAmelCase : Dict = self.conv(__lowerCamelCase ) return hidden_states class _snake_case ( nn.Module ): lowerCamelCase__: int lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: str ) -> Any: __UpperCAmelCase : Optional[int] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Dict , __lowerCamelCase: str ) -> List[Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) __UpperCAmelCase : Any = self.conv(__lowerCamelCase ) return hidden_states class _snake_case ( nn.Module ): lowerCamelCase__: int lowerCamelCase__: int = None lowerCamelCase__: float = 0.0 lowerCamelCase__: bool = None lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: str ) -> List[str]: __UpperCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels __UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) __UpperCAmelCase : List[str] = nn.Conv( __lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __UpperCAmelCase : Optional[Any] = nn.Dense(__lowerCamelCase , dtype=self.dtype ) __UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) __UpperCAmelCase : Optional[Any] = nn.Dropout(self.dropout_prob ) __UpperCAmelCase : Tuple = nn.Conv( __lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __UpperCAmelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __UpperCAmelCase : List[Any] = None if use_nin_shortcut: __UpperCAmelCase : Dict = nn.Conv( __lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , ) def __call__( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=True ) -> List[Any]: __UpperCAmelCase : Dict = hidden_states __UpperCAmelCase : int = self.norma(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = nn.swish(__lowerCamelCase ) __UpperCAmelCase : Tuple = self.conva(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) ) __UpperCAmelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 ) __UpperCAmelCase : List[str] = hidden_states + temb __UpperCAmelCase : Union[str, Any] = self.norma(__lowerCamelCase ) __UpperCAmelCase : Tuple = nn.swish(__lowerCamelCase ) __UpperCAmelCase : str = self.dropout(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : List[str] = self.conva(__lowerCamelCase ) if self.conv_shortcut is not None: __UpperCAmelCase : Optional[int] = self.conv_shortcut(__lowerCamelCase ) return hidden_states + residual
342
1
def _UpperCamelCase ( snake_case__ ) -> bool: if not isinstance(snake_case__, snake_case__ ): __UpperCAmelCase : Union[str, Any] = f'''Input value of [number={number}] must be an integer''' raise TypeError(snake_case__ ) if number < 0: return False __UpperCAmelCase : Any = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
342
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss _snake_case = pytest.mark.integration @require_faiss class _snake_case ( _lowercase ): def _lowerCamelCase ( self: Union[str, Any] ) -> str: __UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} ) return dset def _lowerCamelCase ( self: Optional[Any] ) -> Tuple: import faiss __UpperCAmelCase : Dataset = self._create_dummy_dataset() __UpperCAmelCase : int = dset.map( lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase ) __UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT ) __UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def _lowerCamelCase ( self: List[str] ) -> int: import faiss __UpperCAmelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def _lowerCamelCase ( self: Optional[int] ) -> Dict: import faiss __UpperCAmelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def _lowerCamelCase ( self: List[Any] ) -> List[Any]: __UpperCAmelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def _lowerCamelCase ( self: List[str] ) -> Dict: from elasticsearch import Elasticsearch __UpperCAmelCase : Dataset = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: __UpperCAmelCase : int = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) __UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} __UpperCAmelCase : Any = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class _snake_case ( _lowercase ): def _lowerCamelCase ( self: List[str] ) -> Optional[int]: import faiss __UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa ) __UpperCAmelCase : List[str] = 1 __UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase ) self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1] __UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase ) self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] ) __UpperCAmelCase : Dict = [scores[0] for scores in total_scores] __UpperCAmelCase : int = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCamelCase ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase ) def _lowerCamelCase ( self: Any ) -> List[str]: import faiss __UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(__lowerCamelCase ): __UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def _lowerCamelCase ( self: List[str] ) -> Dict: import faiss __UpperCAmelCase : str = faiss.IndexFlat(5 ) __UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def _lowerCamelCase ( self: Union[str, Any] ) -> int: import faiss __UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file: index.save(tmp_file.name ) __UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa ) __UpperCAmelCase : Tuple = 1 __UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def _UpperCamelCase ( snake_case__ ) -> Optional[Any]: import faiss __UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5, dtype=np.floataa ) ) __UpperCAmelCase : Optional[Any] = "index.faiss" __UpperCAmelCase : Optional[int] = f'''mock://{index_name}''' index.save(snake_case__, storage_options=mockfs.storage_options ) __UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options ) __UpperCAmelCase : str = np.zeros(5, dtype=np.floataa ) __UpperCAmelCase : Any = 1 __UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _snake_case ( _lowercase ): def _lowerCamelCase ( self: str ) -> Union[str, Any]: from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: __UpperCAmelCase : Optional[Any] = Elasticsearch() __UpperCAmelCase : Dict = {"acknowledged": True} __UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query __UpperCAmelCase : Dict = "foo" __UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} __UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __UpperCAmelCase : int = "foo" __UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __UpperCAmelCase : int = ["foo", "bar", "foobar"] __UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} __UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase ) __UpperCAmelCase : Tuple = [scores[0] for scores in total_scores] __UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCamelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __lowerCamelCase ) # batched queries with timeout __UpperCAmelCase : str = ["foo", "bar", "foobar"] __UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 ) __UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores] __UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCamelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __lowerCamelCase )
342
1
import os def _UpperCamelCase ( ) -> Any: with open(os.path.dirname(snake_case__ ) + "/p022_names.txt" ) as file: __UpperCAmelCase : Optional[Any] = str(file.readlines()[0] ) __UpperCAmelCase : Optional[int] = names.replace("\"", "" ).split("," ) names.sort() __UpperCAmelCase : Optional[Any] = 0 __UpperCAmelCase : Optional[int] = 0 for i, name in enumerate(snake_case__ ): for letter in name: name_score += ord(snake_case__ ) - 64 total_score += (i + 1) * name_score __UpperCAmelCase : Union[str, Any] = 0 return total_score if __name__ == "__main__": print(solution())
342
import argparse import struct import unittest class _snake_case : def __init__( self: Tuple , __lowerCamelCase: bytes ) -> None: __UpperCAmelCase : Tuple = data # Initialize hash values __UpperCAmelCase : Any = [ 0x6_A_0_9_E_6_6_7, 0xB_B_6_7_A_E_8_5, 0x3_C_6_E_F_3_7_2, 0xA_5_4_F_F_5_3_A, 0x5_1_0_E_5_2_7_F, 0x9_B_0_5_6_8_8_C, 0x1_F_8_3_D_9_A_B, 0x5_B_E_0_C_D_1_9, ] # Initialize round constants __UpperCAmelCase : Dict = [ 0x4_2_8_A_2_F_9_8, 0x7_1_3_7_4_4_9_1, 0xB_5_C_0_F_B_C_F, 0xE_9_B_5_D_B_A_5, 0x3_9_5_6_C_2_5_B, 0x5_9_F_1_1_1_F_1, 0x9_2_3_F_8_2_A_4, 0xA_B_1_C_5_E_D_5, 0xD_8_0_7_A_A_9_8, 0x1_2_8_3_5_B_0_1, 0x2_4_3_1_8_5_B_E, 0x5_5_0_C_7_D_C_3, 0x7_2_B_E_5_D_7_4, 0x8_0_D_E_B_1_F_E, 0x9_B_D_C_0_6_A_7, 0xC_1_9_B_F_1_7_4, 0xE_4_9_B_6_9_C_1, 0xE_F_B_E_4_7_8_6, 0x0_F_C_1_9_D_C_6, 0x2_4_0_C_A_1_C_C, 0x2_D_E_9_2_C_6_F, 0x4_A_7_4_8_4_A_A, 0x5_C_B_0_A_9_D_C, 0x7_6_F_9_8_8_D_A, 0x9_8_3_E_5_1_5_2, 0xA_8_3_1_C_6_6_D, 0xB_0_0_3_2_7_C_8, 0xB_F_5_9_7_F_C_7, 0xC_6_E_0_0_B_F_3, 0xD_5_A_7_9_1_4_7, 0x0_6_C_A_6_3_5_1, 0x1_4_2_9_2_9_6_7, 0x2_7_B_7_0_A_8_5, 0x2_E_1_B_2_1_3_8, 0x4_D_2_C_6_D_F_C, 0x5_3_3_8_0_D_1_3, 0x6_5_0_A_7_3_5_4, 0x7_6_6_A_0_A_B_B, 0x8_1_C_2_C_9_2_E, 0x9_2_7_2_2_C_8_5, 0xA_2_B_F_E_8_A_1, 0xA_8_1_A_6_6_4_B, 0xC_2_4_B_8_B_7_0, 0xC_7_6_C_5_1_A_3, 0xD_1_9_2_E_8_1_9, 0xD_6_9_9_0_6_2_4, 0xF_4_0_E_3_5_8_5, 0x1_0_6_A_A_0_7_0, 0x1_9_A_4_C_1_1_6, 0x1_E_3_7_6_C_0_8, 0x2_7_4_8_7_7_4_C, 0x3_4_B_0_B_C_B_5, 0x3_9_1_C_0_C_B_3, 0x4_E_D_8_A_A_4_A, 0x5_B_9_C_C_A_4_F, 0x6_8_2_E_6_F_F_3, 0x7_4_8_F_8_2_E_E, 0x7_8_A_5_6_3_6_F, 0x8_4_C_8_7_8_1_4, 0x8_C_C_7_0_2_0_8, 0x9_0_B_E_F_F_F_A, 0xA_4_5_0_6_C_E_B, 0xB_E_F_9_A_3_F_7, 0xC_6_7_1_7_8_F_2, ] __UpperCAmelCase : List[Any] = self.preprocessing(self.data ) self.final_hash() @staticmethod def _lowerCamelCase ( __lowerCamelCase: bytes ) -> bytes: __UpperCAmelCase : List[str] = B"\x80" + (B"\x00" * (63 - (len(__lowerCamelCase ) + 8) % 64)) __UpperCAmelCase : int = struct.pack(">Q" , (len(__lowerCamelCase ) * 8) ) return data + padding + big_endian_integer def _lowerCamelCase ( self: Dict ) -> None: # Convert into blocks of 64 bytes __UpperCAmelCase : Dict = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __UpperCAmelCase : List[str] = list(struct.unpack(">16L" , __lowerCamelCase ) ) # add 48 0-ed integers words += [0] * 48 __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array __UpperCAmelCase : Union[str, Any] = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) __UpperCAmelCase : str = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) __UpperCAmelCase : Union[str, Any] = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_0_0_0_0_0_0_0_0 # Compression __UpperCAmelCase : Union[str, Any] = self.ror(__lowerCamelCase , 6 ) ^ self.ror(__lowerCamelCase , 11 ) ^ self.ror(__lowerCamelCase , 25 ) __UpperCAmelCase : Tuple = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g) __UpperCAmelCase : int = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_0_0_0_0_0_0_0_0 __UpperCAmelCase : List[Any] = self.ror(__lowerCamelCase , 2 ) ^ self.ror(__lowerCamelCase , 13 ) ^ self.ror(__lowerCamelCase , 22 ) __UpperCAmelCase : Dict = (a & b) ^ (a & c) ^ (b & c) __UpperCAmelCase : int = (sa + maj) % 0x1_0_0_0_0_0_0_0_0 __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = ( g, f, e, ((d + tempa) % 0x1_0_0_0_0_0_0_0_0), c, b, a, ((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0), ) __UpperCAmelCase : Optional[int] = [a, b, c, d, e, f, g, h] # Modify final values __UpperCAmelCase : List[str] = [ ((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0) for index, element in enumerate(self.hashes ) ] __UpperCAmelCase : int = "".join([hex(__lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> int: return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations) class _snake_case ( unittest.TestCase ): def _lowerCamelCase ( self: List[Any] ) -> None: import hashlib __UpperCAmelCase : Dict = bytes("Test String" , "utf-8" ) self.assertEqual(SHAaaa(__lowerCamelCase ).hash , hashlib.shaaaa(__lowerCamelCase ).hexdigest() ) def _UpperCamelCase ( ) -> None: import doctest doctest.testmod() __UpperCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument( "-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", ) parser.add_argument( "-f", "--file", dest="input_file", help="Hash contents of a file" ) __UpperCAmelCase : List[Any] = parser.parse_args() __UpperCAmelCase : Optional[int] = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file, "rb" ) as f: __UpperCAmelCase : List[str] = f.read() else: __UpperCAmelCase : List[Any] = bytes(snake_case__, "utf-8" ) print(SHAaaa(snake_case__ ).hash ) if __name__ == "__main__": main()
342
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''facebook/data2vec-vision-base-ft''': ( '''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json''' ), } class _snake_case ( _lowercase ): lowerCamelCase__: Union[str, Any] = "data2vec-vision" def __init__( self: List[str] , __lowerCamelCase: str=7_68 , __lowerCamelCase: Optional[Any]=12 , __lowerCamelCase: Optional[Any]=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: int="gelu" , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: str=0.0 , __lowerCamelCase: List[str]=0.02 , __lowerCamelCase: Optional[Any]=1e-12 , __lowerCamelCase: Any=2_24 , __lowerCamelCase: Optional[Any]=16 , __lowerCamelCase: Optional[Any]=3 , __lowerCamelCase: int=False , __lowerCamelCase: Any=False , __lowerCamelCase: Union[str, Any]=False , __lowerCamelCase: List[str]=False , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=True , __lowerCamelCase: str=[3, 5, 7, 11] , __lowerCamelCase: Optional[int]=[1, 2, 3, 6] , __lowerCamelCase: List[str]=True , __lowerCamelCase: List[Any]=0.4 , __lowerCamelCase: Dict=2_56 , __lowerCamelCase: Any=1 , __lowerCamelCase: Optional[Any]=False , __lowerCamelCase: Dict=2_55 , **__lowerCamelCase: List[Any] , ) -> str: super().__init__(**__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = hidden_size __UpperCAmelCase : Tuple = num_hidden_layers __UpperCAmelCase : int = num_attention_heads __UpperCAmelCase : Tuple = intermediate_size __UpperCAmelCase : str = hidden_act __UpperCAmelCase : Tuple = hidden_dropout_prob __UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob __UpperCAmelCase : List[str] = initializer_range __UpperCAmelCase : int = layer_norm_eps __UpperCAmelCase : Dict = image_size __UpperCAmelCase : Any = patch_size __UpperCAmelCase : Optional[int] = num_channels __UpperCAmelCase : List[str] = use_mask_token __UpperCAmelCase : Tuple = use_absolute_position_embeddings __UpperCAmelCase : Optional[int] = use_relative_position_bias __UpperCAmelCase : str = use_shared_relative_position_bias __UpperCAmelCase : Tuple = layer_scale_init_value __UpperCAmelCase : str = drop_path_rate __UpperCAmelCase : Optional[Any] = use_mean_pooling # decode head attributes (semantic segmentation) __UpperCAmelCase : Optional[Any] = out_indices __UpperCAmelCase : List[Any] = pool_scales # auxiliary head attributes (semantic segmentation) __UpperCAmelCase : Tuple = use_auxiliary_head __UpperCAmelCase : List[str] = auxiliary_loss_weight __UpperCAmelCase : Optional[int] = auxiliary_channels __UpperCAmelCase : List[str] = auxiliary_num_convs __UpperCAmelCase : Tuple = auxiliary_concat_input __UpperCAmelCase : List[str] = semantic_loss_ignore_index class _snake_case ( _lowercase ): lowerCamelCase__: List[Any] = version.parse("1.11" ) @property def _lowerCamelCase ( self: Optional[int] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _lowerCamelCase ( self: Dict ) -> float: return 1e-4
342
import numpy as np import datasets _snake_case = ''' Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] ''' _snake_case = '''\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ''' _snake_case = ''' Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def _lowerCamelCase ( self: List[str] ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> List[str]: # convert to numpy arrays __UpperCAmelCase : int = np.array(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = np.array(__lowerCamelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction __UpperCAmelCase : str = X - np.mean(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = np.cov(reference_distribution.T ) try: __UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase ) except np.linalg.LinAlgError: __UpperCAmelCase : Optional[int] = np.linalg.pinv(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = np.dot(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
342
1
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig _snake_case = logging.get_logger(__name__) # General docstring _snake_case = '''PoolFormerConfig''' # Base docstring _snake_case = '''sail/poolformer_s12''' _snake_case = [1, 512, 7, 7] # Image classification docstring _snake_case = '''sail/poolformer_s12''' _snake_case = '''tabby, tabby cat''' _snake_case = [ '''sail/poolformer_s12''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def _UpperCamelCase ( snake_case__, snake_case__ = 0.0, snake_case__ = False ) -> Any: if drop_prob == 0.0 or not training: return input __UpperCAmelCase : List[str] = 1 - drop_prob __UpperCAmelCase : int = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets __UpperCAmelCase : Dict = keep_prob + torch.rand(snake_case__, dtype=input.dtype, device=input.device ) random_tensor.floor_() # binarize __UpperCAmelCase : Dict = input.div(snake_case__ ) * random_tensor return output class _snake_case ( nn.Module ): def __init__( self: Any , __lowerCamelCase: Optional[float] = None ) -> None: super().__init__() __UpperCAmelCase : List[Any] = drop_prob def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: torch.Tensor ) -> torch.Tensor: return drop_path(__lowerCamelCase , self.drop_prob , self.training ) def _lowerCamelCase ( self: List[Any] ) -> str: return "p={}".format(self.drop_prob ) class _snake_case ( nn.Module ): def __init__( self: Optional[Any] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any]=None ) -> Any: super().__init__() __UpperCAmelCase : Optional[int] = patch_size if isinstance(__lowerCamelCase , collections.abc.Iterable ) else (patch_size, patch_size) __UpperCAmelCase : List[str] = stride if isinstance(__lowerCamelCase , collections.abc.Iterable ) else (stride, stride) __UpperCAmelCase : Union[str, Any] = padding if isinstance(__lowerCamelCase , collections.abc.Iterable ) else (padding, padding) __UpperCAmelCase : Any = nn.Convad(__lowerCamelCase , __lowerCamelCase , kernel_size=__lowerCamelCase , stride=__lowerCamelCase , padding=__lowerCamelCase ) __UpperCAmelCase : Dict = norm_layer(__lowerCamelCase ) if norm_layer else nn.Identity() def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: List[Any] ) -> Optional[int]: __UpperCAmelCase : Optional[Any] = self.projection(__lowerCamelCase ) __UpperCAmelCase : Tuple = self.norm(__lowerCamelCase ) return embeddings class _snake_case ( nn.GroupNorm ): def __init__( self: Dict , __lowerCamelCase: Any , **__lowerCamelCase: Optional[Any] ) -> Union[str, Any]: super().__init__(1 , __lowerCamelCase , **__lowerCamelCase ) class _snake_case ( nn.Module ): def __init__( self: List[Any] , __lowerCamelCase: Dict ) -> List[Any]: super().__init__() __UpperCAmelCase : List[str] = nn.AvgPoolad(__lowerCamelCase , stride=1 , padding=pool_size // 2 , count_include_pad=__lowerCamelCase ) def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[str] ) -> Dict: return self.pool(__lowerCamelCase ) - hidden_states class _snake_case ( nn.Module ): def __init__( self: Dict , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple ) -> Dict: super().__init__() __UpperCAmelCase : Dict = nn.Convad(__lowerCamelCase , __lowerCamelCase , 1 ) __UpperCAmelCase : Any = nn.Convad(__lowerCamelCase , __lowerCamelCase , 1 ) __UpperCAmelCase : List[str] = PoolFormerDropPath(__lowerCamelCase ) if isinstance(config.hidden_act , __lowerCamelCase ): __UpperCAmelCase : List[Any] = ACTaFN[config.hidden_act] else: __UpperCAmelCase : Optional[int] = config.hidden_act def _lowerCamelCase ( self: Any , __lowerCamelCase: Optional[int] ) -> Tuple: __UpperCAmelCase : Optional[int] = self.conva(__lowerCamelCase ) __UpperCAmelCase : Tuple = self.act_fn(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.drop(__lowerCamelCase ) __UpperCAmelCase : List[str] = self.conva(__lowerCamelCase ) __UpperCAmelCase : Any = self.drop(__lowerCamelCase ) return hidden_states class _snake_case ( nn.Module ): def __init__( self: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: str ) -> Dict: super().__init__() __UpperCAmelCase : Optional[Any] = PoolFormerPooling(__lowerCamelCase ) __UpperCAmelCase : Any = PoolFormerOutput(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Tuple = PoolFormerGroupNorm(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = PoolFormerGroupNorm(__lowerCamelCase ) # Useful for training neural nets __UpperCAmelCase : Tuple = PoolFormerDropPath(__lowerCamelCase ) if drop_path > 0.0 else nn.Identity() __UpperCAmelCase : List[str] = config.use_layer_scale if config.use_layer_scale: __UpperCAmelCase : Optional[int] = nn.Parameter( config.layer_scale_init_value * torch.ones((__lowerCamelCase) ) , requires_grad=__lowerCamelCase ) __UpperCAmelCase : Dict = nn.Parameter( config.layer_scale_init_value * torch.ones((__lowerCamelCase) ) , requires_grad=__lowerCamelCase ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[Any] ) -> str: if self.use_layer_scale: __UpperCAmelCase : List[Any] = self.pooling(self.before_norm(__lowerCamelCase ) ) __UpperCAmelCase : Optional[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection __UpperCAmelCase : Tuple = hidden_states + self.drop_path(__lowerCamelCase ) __UpperCAmelCase : Tuple = () __UpperCAmelCase : Any = self.output(self.after_norm(__lowerCamelCase ) ) __UpperCAmelCase : List[str] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection __UpperCAmelCase : Optional[Any] = hidden_states + self.drop_path(__lowerCamelCase ) __UpperCAmelCase : List[str] = (output,) + outputs return outputs else: __UpperCAmelCase : Tuple = self.drop_path(self.pooling(self.before_norm(__lowerCamelCase ) ) ) # First residual connection __UpperCAmelCase : Optional[Any] = pooling_output + hidden_states __UpperCAmelCase : Any = () # Second residual connection inside the PoolFormerOutput block __UpperCAmelCase : Any = self.drop_path(self.output(self.after_norm(__lowerCamelCase ) ) ) __UpperCAmelCase : Any = hidden_states + layer_output __UpperCAmelCase : Dict = (output,) + outputs return outputs class _snake_case ( nn.Module ): def __init__( self: str , __lowerCamelCase: Any ) -> Optional[int]: super().__init__() __UpperCAmelCase : Tuple = config # stochastic depth decay rule __UpperCAmelCase : Optional[Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings __UpperCAmelCase : Dict = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) __UpperCAmelCase : Dict = nn.ModuleList(__lowerCamelCase ) # Transformer blocks __UpperCAmelCase : Tuple = [] __UpperCAmelCase : Tuple = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers __UpperCAmelCase : int = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( __lowerCamelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(__lowerCamelCase ) ) __UpperCAmelCase : Tuple = nn.ModuleList(__lowerCamelCase ) def _lowerCamelCase ( self: int , __lowerCamelCase: Dict , __lowerCamelCase: List[str]=False , __lowerCamelCase: int=True ) -> Optional[int]: __UpperCAmelCase : Dict = () if output_hidden_states else None __UpperCAmelCase : List[Any] = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): __UpperCAmelCase , __UpperCAmelCase : Dict = layers # Get patch embeddings from hidden_states __UpperCAmelCase : List[str] = embedding_layer(__lowerCamelCase ) # Send the embeddings through the blocks for _, blk in enumerate(__lowerCamelCase ): __UpperCAmelCase : Union[str, Any] = blk(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = layer_outputs[0] if output_hidden_states: __UpperCAmelCase : int = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__lowerCamelCase , hidden_states=__lowerCamelCase ) class _snake_case ( _lowercase ): lowerCamelCase__: str = PoolFormerConfig lowerCamelCase__: List[Any] = "poolformer" lowerCamelCase__: Optional[Any] = "pixel_values" lowerCamelCase__: int = True def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] ) -> Union[str, Any]: if isinstance(__lowerCamelCase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__lowerCamelCase , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: str , __lowerCamelCase: Optional[int]=False ) -> Optional[Any]: if isinstance(__lowerCamelCase , __lowerCamelCase ): __UpperCAmelCase : str = value _snake_case = r''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' _snake_case = r''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PoolFormerImageProcessor.__call__`] for details. ''' @add_start_docstrings( "The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , _lowercase , ) class _snake_case ( _lowercase ): def __init__( self: Optional[Any] , __lowerCamelCase: Dict ) -> int: super().__init__(__lowerCamelCase ) __UpperCAmelCase : Tuple = config __UpperCAmelCase : Optional[int] = PoolFormerEncoder(__lowerCamelCase ) # Initialize weights and apply final processing self.post_init() def _lowerCamelCase ( self: str ) -> Any: return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Optional[torch.FloatTensor] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]: __UpperCAmelCase : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) __UpperCAmelCase : str = self.encoder( __lowerCamelCase , output_hidden_states=__lowerCamelCase , return_dict=__lowerCamelCase , ) __UpperCAmelCase : Optional[Any] = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=__lowerCamelCase , hidden_states=encoder_outputs.hidden_states , ) class _snake_case ( nn.Module ): def __init__( self: str , __lowerCamelCase: Dict ) -> List[Any]: super().__init__() __UpperCAmelCase : List[str] = nn.Linear(config.hidden_size , config.hidden_size ) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Optional[Any] ) -> Optional[Any]: __UpperCAmelCase : str = self.dense(__lowerCamelCase ) return output @add_start_docstrings( "\n PoolFormer Model transformer with an image classification head on top\n " , _lowercase , ) class _snake_case ( _lowercase ): def __init__( self: Dict , __lowerCamelCase: Tuple ) -> Any: super().__init__(__lowerCamelCase ) __UpperCAmelCase : int = config.num_labels __UpperCAmelCase : Any = PoolFormerModel(__lowerCamelCase ) # Final norm __UpperCAmelCase : Optional[int] = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head __UpperCAmelCase : int = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Optional[torch.FloatTensor] = None , __lowerCamelCase: Optional[torch.LongTensor] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: __UpperCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict __UpperCAmelCase : List[Any] = self.poolformer( __lowerCamelCase , output_hidden_states=__lowerCamelCase , return_dict=__lowerCamelCase , ) __UpperCAmelCase : Optional[Any] = outputs[0] __UpperCAmelCase : Dict = self.classifier(self.norm(__lowerCamelCase ).mean([-2, -1] ) ) __UpperCAmelCase : Tuple = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __UpperCAmelCase : Optional[int] = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __UpperCAmelCase : str = "single_label_classification" else: __UpperCAmelCase : Union[str, Any] = "multi_label_classification" if self.config.problem_type == "regression": __UpperCAmelCase : str = MSELoss() if self.num_labels == 1: __UpperCAmelCase : str = loss_fct(logits.squeeze() , labels.squeeze() ) else: __UpperCAmelCase : List[Any] = loss_fct(__lowerCamelCase , __lowerCamelCase ) elif self.config.problem_type == "single_label_classification": __UpperCAmelCase : Optional[Any] = CrossEntropyLoss() __UpperCAmelCase : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __UpperCAmelCase : int = BCEWithLogitsLoss() __UpperCAmelCase : Tuple = loss_fct(__lowerCamelCase , __lowerCamelCase ) if not return_dict: __UpperCAmelCase : int = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__lowerCamelCase , logits=__lowerCamelCase , hidden_states=outputs.hidden_states )
342
import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class _snake_case ( unittest.TestCase ): def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str: __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : Optional[int] = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : List[str] = use_attention_mask __UpperCAmelCase : Dict = use_token_type_ids __UpperCAmelCase : Optional[int] = use_labels __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Dict = num_hidden_layers __UpperCAmelCase : Dict = num_attention_heads __UpperCAmelCase : Tuple = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_act __UpperCAmelCase : Tuple = hidden_dropout_prob __UpperCAmelCase : str = attention_probs_dropout_prob __UpperCAmelCase : Optional[Any] = max_position_embeddings __UpperCAmelCase : Optional[int] = type_vocab_size __UpperCAmelCase : str = type_sequence_label_size __UpperCAmelCase : Tuple = initializer_range __UpperCAmelCase : str = num_choices def _lowerCamelCase ( self: Optional[Any] ) -> List[str]: __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : str = None if self.use_attention_mask: __UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Any = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , ) return config, input_ids, attention_mask def _lowerCamelCase ( self: str ) -> Any: __UpperCAmelCase : List[str] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs __UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class _snake_case ( _lowercase , unittest.TestCase ): lowerCamelCase__: str = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self: List[Any] ) -> Dict: __UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self ) @slow def _lowerCamelCase ( self: Tuple ) -> Optional[Any]: for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" ) __UpperCAmelCase : Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCamelCase ) @require_flax class _snake_case ( unittest.TestCase ): @slow def _lowerCamelCase ( self: int ) -> List[Any]: __UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" ) __UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] __UpperCAmelCase : str = (1, 11, 7_68) self.assertEqual(output.shape , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
342
1
import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification _snake_case = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co _snake_case = '''main''' # Default branch name _snake_case = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2''' # One particular commit (not the top of `main`) _snake_case = '''aaaaaaa''' # This commit does not exist, so we should 404. _snake_case = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684''' # Sha-1 of config.json on the top of `main`, for checking purposes _snake_case = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3''' @contextlib.contextmanager def _UpperCamelCase ( ) -> Union[str, Any]: print("Welcome!" ) yield print("Bye!" ) @contextlib.contextmanager def _UpperCamelCase ( ) -> List[str]: print("Bonjour!" ) yield print("Au revoir!" ) class _snake_case ( unittest.TestCase ): def _lowerCamelCase ( self: Union[str, Any] ) -> str: # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec("transformers" ) is not None class _snake_case ( unittest.TestCase ): @unittest.mock.patch("sys.stdout" , new_callable=io.StringIO ) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: List[str] ) -> Tuple: with ContextManagers([] ): print("Transformers are awesome!" ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" ) @unittest.mock.patch("sys.stdout" , new_callable=io.StringIO ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Union[str, Any] ) -> Optional[Any]: with ContextManagers([context_en()] ): print("Transformers are awesome!" ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" ) @unittest.mock.patch("sys.stdout" , new_callable=io.StringIO ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[Any] ) -> Union[str, Any]: with ContextManagers([context_fr(), context_en()] ): print("Transformers are awesome!" ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" ) @require_torch def _lowerCamelCase ( self: List[str] ) -> Optional[int]: self.assertEqual(find_labels(__lowerCamelCase ) , ["labels"] ) self.assertEqual(find_labels(__lowerCamelCase ) , ["labels", "next_sentence_label"] ) self.assertEqual(find_labels(__lowerCamelCase ) , ["start_positions", "end_positions"] ) class _snake_case ( _lowercase ): pass self.assertEqual(find_labels(__lowerCamelCase ) , ["labels"] ) @require_tf def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]: self.assertEqual(find_labels(__lowerCamelCase ) , ["labels"] ) self.assertEqual(find_labels(__lowerCamelCase ) , ["labels", "next_sentence_label"] ) self.assertEqual(find_labels(__lowerCamelCase ) , ["start_positions", "end_positions"] ) class _snake_case ( _lowercase ): pass self.assertEqual(find_labels(__lowerCamelCase ) , ["labels"] ) @require_flax def _lowerCamelCase ( self: Dict ) -> int: # Flax models don't have labels self.assertEqual(find_labels(__lowerCamelCase ) , [] ) self.assertEqual(find_labels(__lowerCamelCase ) , [] ) self.assertEqual(find_labels(__lowerCamelCase ) , [] ) class _snake_case ( _lowercase ): pass self.assertEqual(find_labels(__lowerCamelCase ) , [] )
342
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration _snake_case = [ # tf -> hf ('''/''', '''.'''), ('''layer_''', '''layers.'''), ('''kernel''', '''weight'''), ('''beta''', '''bias'''), ('''gamma''', '''weight'''), ('''pegasus''', '''model'''), ] _snake_case = [ ('''.output.dense''', '''.fc2'''), ('''intermediate.LayerNorm''', '''final_layer_norm'''), ('''intermediate.dense''', '''fc1'''), ] _snake_case = ( INIT_COMMON + [ ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.out_proj'''), ('''attention.self''', '''self_attn'''), ('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''), ('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''), ('''attention.encdec''', '''encoder_attn'''), ('''key''', '''k_proj'''), ('''value''', '''v_proj'''), ('''query''', '''q_proj'''), ('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''), ] + END_COMMON ) _snake_case = ( INIT_COMMON + [ ('''embeddings.word_embeddings''', '''shared.weight'''), ('''embeddings.position_embeddings''', '''embed_positions.weight'''), ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.output'''), ('''attention.self''', '''self_attn.self'''), ('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''), ] + END_COMMON ) _snake_case = [ '''encdec/key/bias''', '''encdec/query/bias''', '''encdec/value/bias''', '''self/key/bias''', '''self/query/bias''', '''self/value/bias''', '''encdec_output/dense/bias''', '''attention/output/dense/bias''', ] def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any: for tf_name, hf_name in patterns: __UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ ) return k def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration: __UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ ) __UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ ) __UpperCAmelCase : Optional[Any] = torch_model.state_dict() __UpperCAmelCase : Optional[int] = {} # separating decoder weights __UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )} __UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )} for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ): __UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE] if any(snake_case__ ): continue __UpperCAmelCase : List[str] = DECODER_PATTERNS __UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ ) if new_k not in state_dict: raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ["dense", "query", "key", "value"] ): __UpperCAmelCase : Optional[int] = v.T __UpperCAmelCase : str = torch.from_numpy(snake_case__ ) assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ): __UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE] if any(snake_case__ ): continue __UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS __UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ["dense", "query", "key", "value"] ): __UpperCAmelCase : List[Any] = v.T __UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' __UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"] __UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" ) __UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ ) __UpperCAmelCase : str = [ k for k in missing if k not in [ "final_logits_bias", "model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight", "lm_head.weight", ] ] assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], f'''no matches found for the following tf keys {extra}''' return torch_model def _UpperCamelCase ( snake_case__ ) -> Dict: __UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ ) __UpperCAmelCase : List[str] = {} __UpperCAmelCase : str = ["global_step"] for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ): __UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name ) if skip_key: continue __UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ ) __UpperCAmelCase : Tuple = array return tf_weights def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict: __UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ ) __UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ ) torch_model.save_pretrained(snake_case__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') _snake_case = parser.parse_args() _snake_case = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
342
1
import flax.linen as nn import jax import jax.numpy as jnp class _snake_case ( nn.Module ): lowerCamelCase__: int lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]: __UpperCAmelCase : List[str] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Optional[Any] , __lowerCamelCase: Optional[int] ) -> List[Any]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = hidden_states.shape __UpperCAmelCase : Dict = jax.image.resize( __lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , ) __UpperCAmelCase : Dict = self.conv(__lowerCamelCase ) return hidden_states class _snake_case ( nn.Module ): lowerCamelCase__: int lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: str ) -> Any: __UpperCAmelCase : Optional[int] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Dict , __lowerCamelCase: str ) -> List[Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) __UpperCAmelCase : Any = self.conv(__lowerCamelCase ) return hidden_states class _snake_case ( nn.Module ): lowerCamelCase__: int lowerCamelCase__: int = None lowerCamelCase__: float = 0.0 lowerCamelCase__: bool = None lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: str ) -> List[str]: __UpperCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels __UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) __UpperCAmelCase : List[str] = nn.Conv( __lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __UpperCAmelCase : Optional[Any] = nn.Dense(__lowerCamelCase , dtype=self.dtype ) __UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) __UpperCAmelCase : Optional[Any] = nn.Dropout(self.dropout_prob ) __UpperCAmelCase : Tuple = nn.Conv( __lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __UpperCAmelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __UpperCAmelCase : List[Any] = None if use_nin_shortcut: __UpperCAmelCase : Dict = nn.Conv( __lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , ) def __call__( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=True ) -> List[Any]: __UpperCAmelCase : Dict = hidden_states __UpperCAmelCase : int = self.norma(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = nn.swish(__lowerCamelCase ) __UpperCAmelCase : Tuple = self.conva(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) ) __UpperCAmelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 ) __UpperCAmelCase : List[str] = hidden_states + temb __UpperCAmelCase : Union[str, Any] = self.norma(__lowerCamelCase ) __UpperCAmelCase : Tuple = nn.swish(__lowerCamelCase ) __UpperCAmelCase : str = self.dropout(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : List[str] = self.conva(__lowerCamelCase ) if self.conv_shortcut is not None: __UpperCAmelCase : Optional[int] = self.conv_shortcut(__lowerCamelCase ) return hidden_states + residual
342
import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class _snake_case ( _lowercase ): lowerCamelCase__: Any = ["image_processor", "tokenizer"] lowerCamelCase__: Optional[Any] = "BlipImageProcessor" lowerCamelCase__: Optional[int] = "AutoTokenizer" def __init__( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict: super().__init__(__lowerCamelCase , __lowerCamelCase ) # add QFormer tokenizer __UpperCAmelCase : Dict = qformer_tokenizer def __call__( self: Any , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Dict , ) -> BatchFeature: if images is None and text is None: raise ValueError("You have to specify at least images or text." ) __UpperCAmelCase : str = BatchFeature() if text is not None: __UpperCAmelCase : Any = self.tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) encoding.update(__lowerCamelCase ) __UpperCAmelCase : Dict = self.qformer_tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) __UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" ) __UpperCAmelCase : Optional[int] = qformer_text_encoding.pop("attention_mask" ) if images is not None: __UpperCAmelCase : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase ) encoding.update(__lowerCamelCase ) return encoding def _lowerCamelCase ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Optional[Any]: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: Dict ) -> Tuple: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def _lowerCamelCase ( self: List[str] ) -> Tuple: __UpperCAmelCase : str = self.tokenizer.model_input_names __UpperCAmelCase : Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> str: if os.path.isfile(__lowerCamelCase ): raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) __UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , "qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(__lowerCamelCase ) return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase ) @classmethod def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]: __UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" ) __UpperCAmelCase : List[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase ) args.append(__lowerCamelCase ) return cls(*__lowerCamelCase )
342
1
from __future__ import annotations from scipy.special import comb # type: ignore class _snake_case : def __init__( self: str , __lowerCamelCase: list[tuple[float, float]] ) -> Optional[int]: __UpperCAmelCase : int = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. __UpperCAmelCase : Dict = len(__lowerCamelCase ) - 1 def _lowerCamelCase ( self: List[str] , __lowerCamelCase: float ) -> list[float]: assert 0 <= t <= 1, "Time t must be between 0 and 1." __UpperCAmelCase : list[float] = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , __lowerCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(__lowerCamelCase ) , 5 ) == 1 return output_values def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: float ) -> tuple[float, float]: assert 0 <= t <= 1, "Time t must be between 0 and 1." __UpperCAmelCase : Optional[int] = self.basis_function(__lowerCamelCase ) __UpperCAmelCase : Tuple = 0.0 __UpperCAmelCase : Dict = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: float = 0.01 ) -> Optional[Any]: from matplotlib import pyplot as plt # type: ignore __UpperCAmelCase : list[float] = [] # x coordinates of points to plot __UpperCAmelCase : list[float] = [] # y coordinates of points to plot __UpperCAmelCase : int = 0.0 while t <= 1: __UpperCAmelCase : Optional[Any] = self.bezier_curve_function(__lowerCamelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size __UpperCAmelCase : str = [i[0] for i in self.list_of_points] __UpperCAmelCase : str = [i[1] for i in self.list_of_points] plt.plot( __lowerCamelCase , __lowerCamelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , ) plt.scatter(__lowerCamelCase , __lowerCamelCase , color="red" , label="Control Points" ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
342
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _snake_case = logging.get_logger(__name__) _snake_case = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } _snake_case = { '''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''}, '''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''}, '''tokenizer_config_file''': { '''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json''' }, } _snake_case = {'''facebook/blenderbot-3B''': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _UpperCamelCase ( ) -> Dict: __UpperCAmelCase : Tuple = ( list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) ) ) __UpperCAmelCase : str = bs[:] __UpperCAmelCase : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(snake_case__ ) cs.append(2**8 + n ) n += 1 __UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs] return dict(zip(snake_case__, snake_case__ ) ) def _UpperCamelCase ( snake_case__ ) -> Any: __UpperCAmelCase : List[Any] = set() __UpperCAmelCase : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase : Union[str, Any] = char return pairs class _snake_case ( _lowercase ): lowerCamelCase__: str = VOCAB_FILES_NAMES lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__: Dict = ["input_ids", "attention_mask"] def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]: __UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token __UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token __UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token __UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token __UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token __UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: __UpperCAmelCase : List[Any] = json.load(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()} __UpperCAmelCase : Dict = errors # how to handle errors in decoding __UpperCAmelCase : Optional[int] = bytes_to_unicode() __UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: __UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1] __UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges] __UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : Tuple = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _lowerCamelCase ( self: Dict ) -> Any: return len(self.encoder ) def _lowerCamelCase ( self: Optional[Any] ) -> List[str]: return dict(self.encoder , **self.added_tokens_encoder ) def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]: if token in self.cache: return self.cache[token] __UpperCAmelCase : List[Any] = tuple(__lowerCamelCase ) __UpperCAmelCase : Dict = get_pairs(__lowerCamelCase ) if not pairs: return token while True: __UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : str = 0 while i < len(__lowerCamelCase ): try: __UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase : Union[str, Any] = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase : List[Any] = tuple(__lowerCamelCase ) __UpperCAmelCase : str = new_word if len(__lowerCamelCase ) == 1: break else: __UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = word return word def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict: __UpperCAmelCase : Any = [] for token in re.findall(self.pat , __lowerCamelCase ): __UpperCAmelCase : int = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict: return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]: return self.decoder.get(__lowerCamelCase ) def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int: __UpperCAmelCase : Dict = "".join(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase : Any = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __UpperCAmelCase : Dict = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) __UpperCAmelCase : Optional[Any] = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) __UpperCAmelCase : Optional[Any] = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]: __UpperCAmelCase : int = [self.sep_token_id] __UpperCAmelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]: __UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): __UpperCAmelCase : Optional[Any] = " " + text return (text, kwargs) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]: return token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]: __UpperCAmelCase : Tuple = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase ) if len(__lowerCamelCase ) > self.model_max_length: __UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
342
1
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _UpperCamelCase ( snake_case__, snake_case__=0.999, snake_case__="cosine", ) -> List[Any]: if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case__ ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case__ ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) __UpperCAmelCase : Union[str, Any] = [] for i in range(snake_case__ ): __UpperCAmelCase : Union[str, Any] = i / num_diffusion_timesteps __UpperCAmelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ), snake_case__ ) ) return torch.tensor(snake_case__, dtype=torch.floataa ) class _snake_case ( _lowercase , _lowercase ): lowerCamelCase__: Dict = [e.name for e in KarrasDiffusionSchedulers] lowerCamelCase__: Optional[int] = 2 @register_to_config def __init__( self: Optional[Any] , __lowerCamelCase: int = 10_00 , __lowerCamelCase: float = 0.0_00_85 , __lowerCamelCase: float = 0.0_12 , __lowerCamelCase: str = "linear" , __lowerCamelCase: Optional[Union[np.ndarray, List[float]]] = None , __lowerCamelCase: str = "epsilon" , __lowerCamelCase: str = "linspace" , __lowerCamelCase: int = 0 , ) -> int: if trained_betas is not None: __UpperCAmelCase : Any = torch.tensor(__lowerCamelCase , dtype=torch.floataa ) elif beta_schedule == "linear": __UpperCAmelCase : Optional[Any] = torch.linspace(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __UpperCAmelCase : Optional[Any] = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCamelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __UpperCAmelCase : Optional[int] = betas_for_alpha_bar(__lowerCamelCase ) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' ) __UpperCAmelCase : Union[str, Any] = 1.0 - self.betas __UpperCAmelCase : Optional[int] = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any]=None ) -> int: if schedule_timesteps is None: __UpperCAmelCase : List[str] = self.timesteps __UpperCAmelCase : List[Any] = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: __UpperCAmelCase : Union[str, Any] = 1 if len(__lowerCamelCase ) > 1 else 0 else: __UpperCAmelCase : Optional[Any] = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep __UpperCAmelCase : Dict = self._index_counter[timestep_int] return indices[pos].item() @property def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _lowerCamelCase ( self: Dict , __lowerCamelCase: torch.FloatTensor , __lowerCamelCase: Union[float, torch.FloatTensor] , ) -> torch.FloatTensor: __UpperCAmelCase : Optional[int] = self.index_for_timestep(__lowerCamelCase ) if self.state_in_first_order: __UpperCAmelCase : List[Any] = self.sigmas[step_index] else: __UpperCAmelCase : Optional[int] = self.sigmas_interpol[step_index] __UpperCAmelCase : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5) return sample def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: Union[str, torch.device] = None , __lowerCamelCase: Optional[int] = None , ) -> Optional[int]: __UpperCAmelCase : List[str] = num_inference_steps __UpperCAmelCase : Dict = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": __UpperCAmelCase : Any = np.linspace(0 , num_train_timesteps - 1 , __lowerCamelCase , dtype=__lowerCamelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": __UpperCAmelCase : List[Any] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __UpperCAmelCase : List[str] = (np.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": __UpperCAmelCase : Union[str, Any] = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __UpperCAmelCase : Dict = (np.arange(__lowerCamelCase , 0 , -step_ratio )).round().copy().astype(__lowerCamelCase ) timesteps -= 1 else: raise ValueError( f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) __UpperCAmelCase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) __UpperCAmelCase : Tuple = torch.from_numpy(np.log(__lowerCamelCase ) ).to(__lowerCamelCase ) __UpperCAmelCase : Any = np.interp(__lowerCamelCase , np.arange(0 , len(__lowerCamelCase ) ) , __lowerCamelCase ) __UpperCAmelCase : str = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) __UpperCAmelCase : Optional[Any] = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase ) # interpolate sigmas __UpperCAmelCase : Any = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() __UpperCAmelCase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) __UpperCAmelCase : Optional[Any] = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(__lowerCamelCase ).startswith("mps" ): # mps does not support float64 __UpperCAmelCase : Dict = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase , dtype=torch.floataa ) else: __UpperCAmelCase : Union[str, Any] = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase ) # interpolate timesteps __UpperCAmelCase : List[str] = self.sigma_to_t(__lowerCamelCase ).to(__lowerCamelCase , dtype=timesteps.dtype ) __UpperCAmelCase : Tuple = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() __UpperCAmelCase : int = torch.cat([timesteps[:1], interleaved_timesteps] ) __UpperCAmelCase : List[str] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter __UpperCAmelCase : List[Any] = defaultdict(__lowerCamelCase ) def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: List[Any] ) -> List[str]: # get log sigma __UpperCAmelCase : List[str] = sigma.log() # get distribution __UpperCAmelCase : Union[str, Any] = log_sigma - self.log_sigmas[:, None] # get sigmas range __UpperCAmelCase : Union[str, Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) __UpperCAmelCase : int = low_idx + 1 __UpperCAmelCase : str = self.log_sigmas[low_idx] __UpperCAmelCase : List[Any] = self.log_sigmas[high_idx] # interpolate sigmas __UpperCAmelCase : str = (low - log_sigma) / (low - high) __UpperCAmelCase : Optional[Any] = w.clamp(0 , 1 ) # transform interpolation to time range __UpperCAmelCase : Any = (1 - w) * low_idx + w * high_idx __UpperCAmelCase : str = t.view(sigma.shape ) return t @property def _lowerCamelCase ( self: str ) -> int: return self.sample is None def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Union[torch.FloatTensor, np.ndarray] , __lowerCamelCase: Union[float, torch.FloatTensor] , __lowerCamelCase: Union[torch.FloatTensor, np.ndarray] , __lowerCamelCase: bool = True , ) -> Union[SchedulerOutput, Tuple]: __UpperCAmelCase : int = self.index_for_timestep(__lowerCamelCase ) # advance index counter by 1 __UpperCAmelCase : Optional[Any] = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: __UpperCAmelCase : List[Any] = self.sigmas[step_index] __UpperCAmelCase : Optional[Any] = self.sigmas_interpol[step_index + 1] __UpperCAmelCase : Optional[int] = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method __UpperCAmelCase : Tuple = self.sigmas[step_index - 1] __UpperCAmelCase : str = self.sigmas_interpol[step_index] __UpperCAmelCase : Union[str, Any] = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API __UpperCAmelCase : str = 0 __UpperCAmelCase : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": __UpperCAmelCase : int = sigma_hat if self.state_in_first_order else sigma_interpol __UpperCAmelCase : List[str] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": __UpperCAmelCase : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol __UpperCAmelCase : Any = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("prediction_type not implemented yet: sample" ) else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order __UpperCAmelCase : Optional[int] = (sample - pred_original_sample) / sigma_hat # 3. delta timestep __UpperCAmelCase : List[Any] = sigma_interpol - sigma_hat # store for 2nd order step __UpperCAmelCase : List[str] = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order __UpperCAmelCase : List[Any] = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep __UpperCAmelCase : Optional[Any] = sigma_next - sigma_hat __UpperCAmelCase : Tuple = self.sample __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : List[str] = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__lowerCamelCase ) def _lowerCamelCase ( self: Tuple , __lowerCamelCase: torch.FloatTensor , __lowerCamelCase: torch.FloatTensor , __lowerCamelCase: torch.FloatTensor , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples __UpperCAmelCase : int = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase ): # mps does not support float64 __UpperCAmelCase : List[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa ) __UpperCAmelCase : Union[str, Any] = timesteps.to(original_samples.device , dtype=torch.floataa ) else: __UpperCAmelCase : Optional[Any] = self.timesteps.to(original_samples.device ) __UpperCAmelCase : Optional[Any] = timesteps.to(original_samples.device ) __UpperCAmelCase : List[Any] = [self.index_for_timestep(__lowerCamelCase , __lowerCamelCase ) for t in timesteps] __UpperCAmelCase : Tuple = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): __UpperCAmelCase : Optional[Any] = sigma.unsqueeze(-1 ) __UpperCAmelCase : str = original_samples + noise * sigma return noisy_samples def __len__( self: str ) -> Tuple: return self.config.num_train_timesteps
342
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( _lowercase , unittest.TestCase ): lowerCamelCase__: List[Any] = CanineTokenizer lowerCamelCase__: Optional[int] = False def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: super().setUp() __UpperCAmelCase : Tuple = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]: return CanineTokenizer.from_pretrained("google/canine-s" ) def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> CanineTokenizer: __UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) __UpperCAmelCase : Optional[int] = 10_24 return tokenizer @require_torch def _lowerCamelCase ( self: List[str] ) -> int: __UpperCAmelCase : Union[str, Any] = self.canine_tokenizer __UpperCAmelCase : List[str] = ["Life is like a box of chocolates.", "You never know what you're gonna get."] # fmt: off __UpperCAmelCase : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on __UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def _lowerCamelCase ( self: Optional[Any] ) -> Tuple: __UpperCAmelCase : Optional[Any] = self.canine_tokenizer __UpperCAmelCase : Dict = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."] __UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("input_ids" , __lowerCamelCase ) self.assertIn("attention_mask" , __lowerCamelCase ) self.assertIn("token_type_ids" , __lowerCamelCase ) @require_torch def _lowerCamelCase ( self: Any ) -> List[str]: __UpperCAmelCase : Optional[Any] = self.canine_tokenizer __UpperCAmelCase : int = [ "What's the weater?", "It's about 25 degrees.", ] __UpperCAmelCase : List[Any] = tokenizer( text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) def _lowerCamelCase ( self: List[Any] ) -> Tuple: # safety check on max_len default value so we are sure the test works __UpperCAmelCase : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __UpperCAmelCase : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __UpperCAmelCase : int = tempfile.mkdtemp() __UpperCAmelCase : List[Any] = " He is very happy, UNwant\u00E9d,running" __UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) tokenizer.save_pretrained(__lowerCamelCase ) __UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCamelCase ) __UpperCAmelCase : Dict = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) shutil.rmtree(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __UpperCAmelCase : List[Any] = tempfile.mkdtemp() __UpperCAmelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running" __UpperCAmelCase : str = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: __UpperCAmelCase : Tuple = chr(0xE_0_0_7 ) additional_special_tokens.append(__lowerCamelCase ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) __UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) tokenizer.save_pretrained(__lowerCamelCase ) __UpperCAmelCase : str = tokenizer.__class__.from_pretrained(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__lowerCamelCase ) def _lowerCamelCase ( self: str ) -> Optional[int]: __UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_clean_sequence(__lowerCamelCase ) # a special token for Canine can be defined as follows: __UpperCAmelCase : int = 0xE_0_0_5 __UpperCAmelCase : Tuple = chr(__lowerCamelCase ) tokenizer.add_special_tokens({"cls_token": special_token} ) __UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(len(__lowerCamelCase ) , 1 ) __UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) __UpperCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) __UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(__lowerCamelCase , input_encoded + special_token_id ) __UpperCAmelCase : Optional[int] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) self.assertTrue(special_token not in decoded ) def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]: __UpperCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase : Optional[int] = chr(0xE_0_0_5 ) __UpperCAmelCase : List[str] = chr(0xE_0_0_6 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} ) __UpperCAmelCase : Tuple = tokenizer.tokenize(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase ) self.assertEqual(len(__lowerCamelCase ) , 1 ) self.assertEqual(len(__lowerCamelCase ) , 1 ) self.assertEqual(token_a[0] , __lowerCamelCase ) self.assertEqual(token_a[0] , __lowerCamelCase ) @require_tokenizers def _lowerCamelCase ( self: str ) -> Union[str, Any]: __UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: __UpperCAmelCase : Union[str, Any] = 0xE_0_0_6 __UpperCAmelCase : int = chr(__lowerCamelCase ) __UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase ) tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__lowerCamelCase ) tokenizer.from_pretrained(__lowerCamelCase ) def _lowerCamelCase ( self: Dict ) -> List[str]: __UpperCAmelCase : str = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: __UpperCAmelCase : Tuple = json.load(__lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: __UpperCAmelCase : Optional[int] = json.load(__lowerCamelCase ) # a special token for Canine can be defined as follows: __UpperCAmelCase : Any = 0xE_0_0_6 __UpperCAmelCase : Union[str, Any] = chr(__lowerCamelCase ) __UpperCAmelCase : Dict = [new_token_a] __UpperCAmelCase : int = [new_token_a] with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(__lowerCamelCase , __lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(__lowerCamelCase , __lowerCamelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 ) self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) __UpperCAmelCase : List[Any] = 0xE_0_0_7 __UpperCAmelCase : List[Any] = chr(__lowerCamelCase ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __UpperCAmelCase : str = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )] __UpperCAmelCase : Dict = tokenizer_class.from_pretrained( __lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 ) self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase : int = "hello world" if self.space_between_special_tokens: __UpperCAmelCase : Any = "[CLS] hello world [SEP]" else: __UpperCAmelCase : Union[str, Any] = input __UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) __UpperCAmelCase : Any = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__lowerCamelCase , [output, output.lower()] ) def _lowerCamelCase ( self: Dict ) -> Any: __UpperCAmelCase : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase : List[str] = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] __UpperCAmelCase : List[str] = "a" __UpperCAmelCase : Any = ord(__lowerCamelCase ) for attr in attributes_list: setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase ) setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase ) setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] ) __UpperCAmelCase : Tuple = 0xE_0_0_6 __UpperCAmelCase : Optional[Any] = chr(__lowerCamelCase ) setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] ) self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] ) def _lowerCamelCase ( self: str ) -> Union[str, Any]: pass def _lowerCamelCase ( self: Any ) -> Any: pass def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple: pass def _lowerCamelCase ( self: Optional[int] ) -> Any: pass def _lowerCamelCase ( self: List[str] ) -> str: pass def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]: pass def _lowerCamelCase ( self: Optional[Any] ) -> Tuple: pass def _lowerCamelCase ( self: str ) -> Tuple: pass
342
1
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class _snake_case ( unittest.TestCase ): def __init__( self: List[Any] , __lowerCamelCase: Tuple , __lowerCamelCase: bool = True , __lowerCamelCase: Dict[str, int] = None , __lowerCamelCase: int = 32 , __lowerCamelCase: bool = True , __lowerCamelCase: Union[int, float] = 1 / 2_55 , __lowerCamelCase: bool = True , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[float, List[float]]] = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , __lowerCamelCase: Optional[Union[float, List[float]]] = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , __lowerCamelCase: bool = True , __lowerCamelCase: Dict=7 , __lowerCamelCase: Union[str, Any]=30 , __lowerCamelCase: Dict=4_00 , __lowerCamelCase: Tuple=3 , ) -> int: __UpperCAmelCase : Dict = parent __UpperCAmelCase : str = do_resize __UpperCAmelCase : Tuple = size if size is not None else {"shortest_edge": 2_88} __UpperCAmelCase : str = size_divisor __UpperCAmelCase : Optional[Any] = do_rescale __UpperCAmelCase : Dict = rescale_factor __UpperCAmelCase : Any = do_normalize __UpperCAmelCase : Dict = do_center_crop __UpperCAmelCase : List[Any] = image_mean __UpperCAmelCase : Optional[int] = image_std __UpperCAmelCase : Any = do_pad __UpperCAmelCase : int = batch_size __UpperCAmelCase : int = num_channels __UpperCAmelCase : Optional[Any] = min_resolution __UpperCAmelCase : Optional[int] = max_resolution def _lowerCamelCase ( self: Union[str, Any] ) -> int: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Dict , __lowerCamelCase: int=False ) -> Any: if not batched: __UpperCAmelCase : Optional[Any] = self.size["shortest_edge"] __UpperCAmelCase : List[str] = image_inputs[0] if isinstance(__lowerCamelCase , Image.Image ): __UpperCAmelCase , __UpperCAmelCase : int = image.size else: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = image.shape[1], image.shape[2] __UpperCAmelCase : Optional[Any] = size / min(__lowerCamelCase , __lowerCamelCase ) if h < w: __UpperCAmelCase , __UpperCAmelCase : Tuple = size, scale * w else: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = scale * h, size __UpperCAmelCase : Optional[int] = int((13_33 / 8_00) * size ) if max(__lowerCamelCase , __lowerCamelCase ) > max_size: __UpperCAmelCase : int = max_size / max(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[Any] = newh * scale __UpperCAmelCase : str = neww * scale __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = int(newh + 0.5 ), int(neww + 0.5 ) __UpperCAmelCase , __UpperCAmelCase : str = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __UpperCAmelCase : List[Any] = [] for image in image_inputs: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __UpperCAmelCase : List[Any] = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0] __UpperCAmelCase : Optional[Any] = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _snake_case ( _lowercase , unittest.TestCase ): lowerCamelCase__: Dict = BridgeTowerImageProcessor if is_vision_available() else None def _lowerCamelCase ( self: Optional[Any] ) -> Dict: __UpperCAmelCase : List[Any] = BridgeTowerImageProcessingTester(self ) @property def _lowerCamelCase ( self: List[str] ) -> int: return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self: str ) -> str: __UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size_divisor" ) ) def _lowerCamelCase ( self: Any ) -> int: pass def _lowerCamelCase ( self: int ) -> Any: # Initialize image processor __UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input __UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase : str = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowerCamelCase ( self: Optional[int] ) -> List[str]: # Initialize image processor __UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input __UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values __UpperCAmelCase , __UpperCAmelCase : str = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowerCamelCase ( self: Any ) -> Any: # Initialize image processor __UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input __UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase : List[str] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
342
import logging import os from .state import PartialState class _snake_case ( logging.LoggerAdapter ): @staticmethod def _lowerCamelCase ( __lowerCamelCase: Any ) -> int: __UpperCAmelCase : str = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]: if PartialState._shared_state == {}: raise RuntimeError( "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." ) __UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase ) if self.isEnabledFor(__lowerCamelCase ): if self._should_log(__lowerCamelCase ): __UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase ) self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) elif in_order: __UpperCAmelCase : Optional[int] = PartialState() for i in range(state.num_processes ): if i == state.process_index: __UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase ) self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) state.wait_for_everyone() def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]: if log_level is None: __UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ ) __UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(snake_case__, {} )
342
1
import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any: __UpperCAmelCase : Optional[Any] = UniSpeechSatForSequenceClassification.from_pretrained(snake_case__, config=snake_case__ ) __UpperCAmelCase : int = downstream_dict["projector.weight"] __UpperCAmelCase : Optional[Any] = downstream_dict["projector.bias"] __UpperCAmelCase : Tuple = downstream_dict["model.post_net.linear.weight"] __UpperCAmelCase : Optional[Any] = downstream_dict["model.post_net.linear.bias"] return model def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Optional[int]: __UpperCAmelCase : Union[str, Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(snake_case__, config=snake_case__ ) __UpperCAmelCase : Dict = downstream_dict["model.linear.weight"] __UpperCAmelCase : Optional[Any] = downstream_dict["model.linear.bias"] return model def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> int: __UpperCAmelCase : Optional[int] = UniSpeechSatForXVector.from_pretrained(snake_case__, config=snake_case__ ) __UpperCAmelCase : str = downstream_dict["connector.weight"] __UpperCAmelCase : Any = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __UpperCAmelCase : Optional[Any] = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] __UpperCAmelCase : Tuple = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] __UpperCAmelCase : Optional[int] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] __UpperCAmelCase : Optional[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] __UpperCAmelCase : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] __UpperCAmelCase : Optional[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] __UpperCAmelCase : int = downstream_dict["objective.W"] return model @torch.no_grad() def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> Tuple: __UpperCAmelCase : Dict = torch.load(snake_case__, map_location="cpu" ) __UpperCAmelCase : List[Any] = checkpoint["Downstream"] __UpperCAmelCase : List[Any] = UniSpeechSatConfig.from_pretrained(snake_case__ ) __UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained( snake_case__, return_attention_mask=snake_case__, do_normalize=snake_case__ ) __UpperCAmelCase : Optional[int] = hf_config.architectures[0] if arch.endswith("ForSequenceClassification" ): __UpperCAmelCase : Any = convert_classification(snake_case__, snake_case__, snake_case__ ) elif arch.endswith("ForAudioFrameClassification" ): __UpperCAmelCase : Optional[int] = convert_diarization(snake_case__, snake_case__, snake_case__ ) elif arch.endswith("ForXVector" ): __UpperCAmelCase : Any = convert_xvector(snake_case__, snake_case__, snake_case__ ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: __UpperCAmelCase : Optional[int] = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(snake_case__ ) hf_model.save_pretrained(snake_case__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument( '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.''' ) parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''') parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''') _snake_case = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
342
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _snake_case ( _lowercase ): def __init__( self: Optional[Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str: super().__init__( __lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , ) __UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths} __UpperCAmelCase : int = Text( cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , ) def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]: # Build iterable dataset if self.streaming: __UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCAmelCase : Any = None __UpperCAmelCase : Any = None __UpperCAmelCase : Dict = None __UpperCAmelCase : str = None self.builder.download_and_prepare( download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , ) __UpperCAmelCase : Dict = self.builder.as_dataset( split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory ) return dataset
342
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''', } class _snake_case ( _lowercase ): lowerCamelCase__: List[str] = "open-llama" def __init__( self: Union[str, Any] , __lowerCamelCase: Optional[int]=10_00_00 , __lowerCamelCase: Optional[Any]=40_96 , __lowerCamelCase: int=1_10_08 , __lowerCamelCase: Any=32 , __lowerCamelCase: Union[str, Any]=32 , __lowerCamelCase: Dict="silu" , __lowerCamelCase: Optional[int]=20_48 , __lowerCamelCase: Dict=0.02 , __lowerCamelCase: Tuple=1e-6 , __lowerCamelCase: Any=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]=1 , __lowerCamelCase: int=2 , __lowerCamelCase: Optional[Any]=False , __lowerCamelCase: int=True , __lowerCamelCase: Union[str, Any]=0.1 , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: Tuple=True , __lowerCamelCase: Dict=True , __lowerCamelCase: Any=None , **__lowerCamelCase: Tuple , ) -> Dict: __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Any = hidden_size __UpperCAmelCase : int = intermediate_size __UpperCAmelCase : Dict = num_hidden_layers __UpperCAmelCase : Tuple = num_attention_heads __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : List[str] = initializer_range __UpperCAmelCase : int = rms_norm_eps __UpperCAmelCase : str = use_cache __UpperCAmelCase : int = kwargs.pop( "use_memorry_efficient_attention" , __lowerCamelCase ) __UpperCAmelCase : Optional[Any] = hidden_dropout_prob __UpperCAmelCase : Any = attention_dropout_prob __UpperCAmelCase : List[str] = use_stable_embedding __UpperCAmelCase : Tuple = shared_input_output_embedding __UpperCAmelCase : Dict = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase , ) def _lowerCamelCase ( self: Any ) -> Dict: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'''got {self.rope_scaling}''' ) __UpperCAmelCase : Optional[Any] = self.rope_scaling.get("type" , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = self.rope_scaling.get("factor" , __lowerCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
342
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _snake_case = { '''configuration_trajectory_transformer''': [ '''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrajectoryTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrajectoryTransformerModel''', '''TrajectoryTransformerPreTrainedModel''', '''load_tf_weights_in_trajectory_transformer''', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
342
1
from ....configuration_utils import PretrainedConfig from ....utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''speechbrain/m-ctc-t-large''': '''https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json''', # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class _snake_case ( _lowercase ): lowerCamelCase__: List[str] = "mctct" def __init__( self: Any , __lowerCamelCase: Any=80_65 , __lowerCamelCase: Dict=15_36 , __lowerCamelCase: Union[str, Any]=36 , __lowerCamelCase: str=61_44 , __lowerCamelCase: int=4 , __lowerCamelCase: Dict=3_84 , __lowerCamelCase: Tuple=9_20 , __lowerCamelCase: Union[str, Any]=1e-5 , __lowerCamelCase: Tuple=0.3 , __lowerCamelCase: Union[str, Any]="relu" , __lowerCamelCase: Any=0.02 , __lowerCamelCase: List[Any]=0.3 , __lowerCamelCase: str=0.3 , __lowerCamelCase: Optional[int]=1 , __lowerCamelCase: Optional[Any]=0 , __lowerCamelCase: Any=2 , __lowerCamelCase: List[str]=1 , __lowerCamelCase: Tuple=0.3 , __lowerCamelCase: str=1 , __lowerCamelCase: Dict=(7,) , __lowerCamelCase: Any=(3,) , __lowerCamelCase: Tuple=80 , __lowerCamelCase: str=1 , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: int="sum" , __lowerCamelCase: Any=False , **__lowerCamelCase: Optional[int] , ) -> Dict: super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase ) __UpperCAmelCase : Any = vocab_size __UpperCAmelCase : Optional[int] = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Any = intermediate_size __UpperCAmelCase : Optional[Any] = num_attention_heads __UpperCAmelCase : List[Any] = attention_head_dim __UpperCAmelCase : List[str] = max_position_embeddings __UpperCAmelCase : Dict = layer_norm_eps __UpperCAmelCase : Tuple = layerdrop __UpperCAmelCase : Union[str, Any] = hidden_act __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : str = hidden_dropout_prob __UpperCAmelCase : int = attention_probs_dropout_prob __UpperCAmelCase : Union[str, Any] = pad_token_id __UpperCAmelCase : Optional[int] = bos_token_id __UpperCAmelCase : Optional[int] = eos_token_id __UpperCAmelCase : Optional[Any] = conv_glu_dim __UpperCAmelCase : int = conv_dropout __UpperCAmelCase : Optional[Any] = num_conv_layers __UpperCAmelCase : int = input_feat_per_channel __UpperCAmelCase : List[str] = input_channels __UpperCAmelCase : Optional[Any] = conv_channels __UpperCAmelCase : Union[str, Any] = ctc_loss_reduction __UpperCAmelCase : str = ctc_zero_infinity # prevents config testing fail with exporting to json __UpperCAmelCase : Optional[Any] = list(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = list(__lowerCamelCase ) if len(self.conv_kernel ) != self.num_conv_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.conv_kernel)` == `config.num_conv_layers` " f'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
342
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]: __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : Optional[int] = image_size __UpperCAmelCase : List[str] = num_channels __UpperCAmelCase : Union[str, Any] = num_stages __UpperCAmelCase : List[str] = hidden_sizes __UpperCAmelCase : Any = depths __UpperCAmelCase : Optional[int] = is_training __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : Optional[int] = intermediate_size __UpperCAmelCase : Optional[Any] = hidden_act __UpperCAmelCase : Union[str, Any] = num_labels __UpperCAmelCase : Any = initializer_range __UpperCAmelCase : List[str] = out_features __UpperCAmelCase : Tuple = out_indices __UpperCAmelCase : List[Any] = scope def _lowerCamelCase ( self: List[Any] ) -> Optional[int]: __UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : List[str] = None if self.use_labels: __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase : Optional[Any] = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self: Tuple ) -> List[Any]: return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]: __UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : List[str] = model(__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple: __UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Any = model(__lowerCamelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Any = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _lowerCamelCase ( self: int ) -> List[str]: __UpperCAmelCase : int = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs __UpperCAmelCase : str = {"pixel_values": pixel_values} return config, inputs_dict def _lowerCamelCase ( self: List[Any] ) -> List[Any]: __UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class _snake_case ( _lowercase , _lowercase , unittest.TestCase ): lowerCamelCase__: Dict = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCamelCase__: str = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCamelCase__: Tuple = False lowerCamelCase__: int = False lowerCamelCase__: Dict = False lowerCamelCase__: int = False lowerCamelCase__: Any = False def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]: __UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self ) __UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def _lowerCamelCase ( self: Dict ) -> Tuple: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self: List[Any] ) -> int: return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def _lowerCamelCase ( self: Any ) -> Any: pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def _lowerCamelCase ( self: str ) -> Optional[Any]: pass def _lowerCamelCase ( self: List[Any] ) -> int: if not self.model_tester.is_training: return for model_class in self.all_model_classes: __UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels() __UpperCAmelCase : Optional[Any] = True if model_class.__name__ in [ *get_values(__lowerCamelCase ), *get_values(__lowerCamelCase ), ]: continue __UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() __UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) __UpperCAmelCase : Any = model(**__lowerCamelCase ).loss loss.backward() def _lowerCamelCase ( self: Optional[int] ) -> Dict: if not self.model_tester.is_training: return for model_class in self.all_model_classes: __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels() __UpperCAmelCase : List[str] = False __UpperCAmelCase : int = True if ( model_class.__name__ in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )] or not model_class.supports_gradient_checkpointing ): continue __UpperCAmelCase : int = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.gradient_checkpointing_enable() model.train() __UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) __UpperCAmelCase : Any = model(**__lowerCamelCase ).loss loss.backward() def _lowerCamelCase ( self: List[str] ) -> Dict: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : str = model_class(__lowerCamelCase ) __UpperCAmelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : List[Any] = [*signature.parameters.keys()] __UpperCAmelCase : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _lowerCamelCase ( self: str ) -> List[Any]: __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _lowerCamelCase ( self: Union[str, Any] ) -> Dict: def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ): __UpperCAmelCase : Any = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) __UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : Optional[int] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Optional[int] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Any = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def _lowerCamelCase ( self: Dict ) -> List[Any]: for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def _UpperCamelCase ( ) -> List[Any]: __UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self: Optional[int] ) -> Dict: return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def _lowerCamelCase ( self: List[Any] ) -> Tuple: __UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase ) __UpperCAmelCase : List[str] = self.default_image_processor __UpperCAmelCase : Optional[Any] = prepare_img() __UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): __UpperCAmelCase : str = model(**__lowerCamelCase ) # verify the logits __UpperCAmelCase : Dict = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) __UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
342
1
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib _snake_case = get_logger() _snake_case = None class _snake_case ( TensorFormatter[Mapping, "jax.Array", Mapping] ): def __init__( self: Optional[Any] , __lowerCamelCase: Tuple=None , __lowerCamelCase: Dict=None , **__lowerCamelCase: Optional[Any] ) -> Dict: super().__init__(features=__lowerCamelCase ) import jax from jaxlib.xla_client import Device if isinstance(__lowerCamelCase , __lowerCamelCase ): raise ValueError( f'''Expected {device} to be a `str` not {type(__lowerCamelCase )}, as `jaxlib.xla_extension.Device` ''' "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its string identifier that will be internally mapped " "to the actual `jaxlib.xla_extension.Device`." ) __UpperCAmelCase : List[str] = device if isinstance(__lowerCamelCase , __lowerCamelCase ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: __UpperCAmelCase : Dict = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f'''Device with string identifier {self.device} not listed among the available ''' f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ''' f'''device: {str(jax.devices()[0] )}.''' ) __UpperCAmelCase : int = str(jax.devices()[0] ) __UpperCAmelCase : int = jnp_array_kwargs @staticmethod def _lowerCamelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]: import jax return {str(__lowerCamelCase ): device for device in jax.devices()} def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Dict ) -> Optional[int]: import jax import jax.numpy as jnp if isinstance(__lowerCamelCase , __lowerCamelCase ) and column: if all( isinstance(__lowerCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(__lowerCamelCase , axis=0 ) return column def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[str] ) -> List[str]: import jax import jax.numpy as jnp if isinstance(__lowerCamelCase , (str, bytes, type(__lowerCamelCase )) ): return value elif isinstance(__lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() __UpperCAmelCase : Tuple = {} if isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: __UpperCAmelCase : Any = {"dtype": jnp.intaa} else: __UpperCAmelCase : Optional[int] = {"dtype": jnp.intaa} elif isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): __UpperCAmelCase : List[str] = {"dtype": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__lowerCamelCase , PIL.Image.Image ): __UpperCAmelCase : str = np.asarray(__lowerCamelCase ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: __UpperCAmelCase : Any = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(__lowerCamelCase , **{**default_dtype, **self.jnp_array_kwargs} ) def _lowerCamelCase ( self: str , __lowerCamelCase: Tuple ) -> Union[str, Any]: import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(__lowerCamelCase , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(__lowerCamelCase , "__array__" ) and not isinstance(__lowerCamelCase , jax.Array ): __UpperCAmelCase : Tuple = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__lowerCamelCase , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] ) elif isinstance(__lowerCamelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] ) return self._tensorize(__lowerCamelCase ) def _lowerCamelCase ( self: int , __lowerCamelCase: dict ) -> Tuple: return map_nested(self._recursive_tensorize , __lowerCamelCase , map_list=__lowerCamelCase ) def _lowerCamelCase ( self: int , __lowerCamelCase: pa.Table ) -> Mapping: __UpperCAmelCase : Dict = self.numpy_arrow_extractor().extract_row(__lowerCamelCase ) __UpperCAmelCase : List[Any] = self.python_features_decoder.decode_row(__lowerCamelCase ) return self.recursive_tensorize(__lowerCamelCase ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: pa.Table ) -> "jax.Array": __UpperCAmelCase : Optional[Any] = self.numpy_arrow_extractor().extract_column(__lowerCamelCase ) __UpperCAmelCase : List[Any] = self.python_features_decoder.decode_column(__lowerCamelCase , pa_table.column_names[0] ) __UpperCAmelCase : str = self.recursive_tensorize(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = self._consolidate(__lowerCamelCase ) return column def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: pa.Table ) -> Mapping: __UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_batch(__lowerCamelCase ) __UpperCAmelCase : int = self.python_features_decoder.decode_batch(__lowerCamelCase ) __UpperCAmelCase : List[str] = self.recursive_tensorize(__lowerCamelCase ) for column_name in batch: __UpperCAmelCase : List[Any] = self._consolidate(batch[column_name] ) return batch
342
import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _snake_case = logging.get_logger(__name__) _snake_case = { '''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''', # See all DETR models at https://huggingface.co/models?filter=detr } class _snake_case ( _lowercase ): lowerCamelCase__: str = "detr" lowerCamelCase__: Dict = ["past_key_values"] lowerCamelCase__: str = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int: if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): __UpperCAmelCase : List[Any] = backbone_config.get("model_type" ) __UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type] __UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase ) # set timm attributes to None __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None __UpperCAmelCase : Any = use_timm_backbone __UpperCAmelCase : Optional[Any] = backbone_config __UpperCAmelCase : Optional[Any] = num_channels __UpperCAmelCase : List[Any] = num_queries __UpperCAmelCase : Optional[int] = d_model __UpperCAmelCase : Optional[Any] = encoder_ffn_dim __UpperCAmelCase : Dict = encoder_layers __UpperCAmelCase : List[Any] = encoder_attention_heads __UpperCAmelCase : int = decoder_ffn_dim __UpperCAmelCase : Tuple = decoder_layers __UpperCAmelCase : int = decoder_attention_heads __UpperCAmelCase : List[Any] = dropout __UpperCAmelCase : Dict = attention_dropout __UpperCAmelCase : Optional[Any] = activation_dropout __UpperCAmelCase : int = activation_function __UpperCAmelCase : Any = init_std __UpperCAmelCase : str = init_xavier_std __UpperCAmelCase : int = encoder_layerdrop __UpperCAmelCase : Tuple = decoder_layerdrop __UpperCAmelCase : List[Any] = encoder_layers __UpperCAmelCase : Optional[Any] = auxiliary_loss __UpperCAmelCase : int = position_embedding_type __UpperCAmelCase : Optional[int] = backbone __UpperCAmelCase : str = use_pretrained_backbone __UpperCAmelCase : Dict = dilation # Hungarian matcher __UpperCAmelCase : Optional[int] = class_cost __UpperCAmelCase : Optional[Any] = bbox_cost __UpperCAmelCase : Optional[int] = giou_cost # Loss coefficients __UpperCAmelCase : Any = mask_loss_coefficient __UpperCAmelCase : Any = dice_loss_coefficient __UpperCAmelCase : Any = bbox_loss_coefficient __UpperCAmelCase : Optional[int] = giou_loss_coefficient __UpperCAmelCase : Optional[Any] = eos_coefficient super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase ) @property def _lowerCamelCase ( self: Dict ) -> int: return self.encoder_attention_heads @property def _lowerCamelCase ( self: str ) -> int: return self.d_model @classmethod def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]: return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: str ) -> Dict[str, any]: __UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: __UpperCAmelCase : int = self.backbone_config.to_dict() __UpperCAmelCase : List[str] = self.__class__.model_type return output class _snake_case ( _lowercase ): lowerCamelCase__: Optional[int] = version.parse("1.11" ) @property def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _lowerCamelCase ( self: Optional[Any] ) -> float: return 1e-5 @property def _lowerCamelCase ( self: List[str] ) -> int: return 12
342
1
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : def __init__( self: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: List[Any]=3 , __lowerCamelCase: Any=32 , __lowerCamelCase: Dict=3 , __lowerCamelCase: int=10 , __lowerCamelCase: Dict=[10, 20, 30, 40] , __lowerCamelCase: List[str]=[1, 1, 2, 1] , __lowerCamelCase: str=True , __lowerCamelCase: str=True , __lowerCamelCase: List[Any]="relu" , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=None , ) -> Optional[Any]: __UpperCAmelCase : Dict = parent __UpperCAmelCase : Any = batch_size __UpperCAmelCase : List[Any] = image_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : Any = embeddings_size __UpperCAmelCase : Tuple = hidden_sizes __UpperCAmelCase : Union[str, Any] = depths __UpperCAmelCase : Union[str, Any] = is_training __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : Dict = hidden_act __UpperCAmelCase : Tuple = num_labels __UpperCAmelCase : Dict = scope __UpperCAmelCase : Union[str, Any] = len(__lowerCamelCase ) def _lowerCamelCase ( self: Optional[Any] ) -> List[str]: __UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : List[Any] = None if self.use_labels: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase : List[Any] = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self: List[str] ) -> Optional[int]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _lowerCamelCase ( self: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: str , __lowerCamelCase: List[Any] ) -> Optional[Any]: __UpperCAmelCase : Union[str, Any] = TFRegNetModel(config=__lowerCamelCase ) __UpperCAmelCase : Tuple = model(__lowerCamelCase , training=__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _lowerCamelCase ( self: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int ) -> List[Any]: __UpperCAmelCase : Union[str, Any] = self.num_labels __UpperCAmelCase : str = TFRegNetForImageClassification(__lowerCamelCase ) __UpperCAmelCase : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self: Optional[int] ) -> Union[str, Any]: __UpperCAmelCase : int = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs __UpperCAmelCase : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class _snake_case ( _lowercase , _lowercase , unittest.TestCase ): lowerCamelCase__: Optional[Any] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowerCamelCase__: Any = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) lowerCamelCase__: Optional[Any] = False lowerCamelCase__: Dict = False lowerCamelCase__: Union[str, Any] = False lowerCamelCase__: int = False lowerCamelCase__: Union[str, Any] = False def _lowerCamelCase ( self: int ) -> str: __UpperCAmelCase : List[str] = TFRegNetModelTester(self ) __UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def _lowerCamelCase ( self: Union[str, Any] ) -> int: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def _lowerCamelCase ( self: Any ) -> Tuple: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def _lowerCamelCase ( self: str ) -> List[str]: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def _lowerCamelCase ( self: int ) -> Union[str, Any]: pass def _lowerCamelCase ( self: List[Any] ) -> List[Any]: __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Union[str, Any] = model_class(__lowerCamelCase ) __UpperCAmelCase : Tuple = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Any = [*signature.parameters.keys()] __UpperCAmelCase : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _lowerCamelCase ( self: str ) -> Optional[Any]: __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _lowerCamelCase ( self: Any ) -> Optional[int]: def check_hidden_states_output(__lowerCamelCase: List[str] , __lowerCamelCase: Tuple , __lowerCamelCase: Union[str, Any] ): __UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase ) __UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : int = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : str = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: __UpperCAmelCase : Union[str, Any] = layer_type __UpperCAmelCase : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : str = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _lowerCamelCase ( self: str ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__lowerCamelCase: Tuple , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str={} ): __UpperCAmelCase : Tuple = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ) __UpperCAmelCase : List[Any] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple() def recursive_check(__lowerCamelCase: Any , __lowerCamelCase: Dict ): if isinstance(__lowerCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ): recursive_check(__lowerCamelCase , __lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=( "Tuple and dict output are not equal. Difference:" f''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase ) for model_class in self.all_model_classes: __UpperCAmelCase : str = model_class(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : List[str] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : int = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) __UpperCAmelCase : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : List[str] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) __UpperCAmelCase : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) __UpperCAmelCase : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) def _lowerCamelCase ( self: Any ) -> Optional[Any]: __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def _lowerCamelCase ( self: int ) -> Any: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : int = TFRegNetModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def _UpperCamelCase ( ) -> Any: __UpperCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class _snake_case ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self: List[str] ) -> Union[str, Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _lowerCamelCase ( self: Optional[Any] ) -> List[str]: __UpperCAmelCase : Union[str, Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __UpperCAmelCase : str = self.default_image_processor __UpperCAmelCase : Optional[int] = prepare_img() __UpperCAmelCase : Dict = image_processor(images=__lowerCamelCase , return_tensors="tf" ) # forward pass __UpperCAmelCase : Optional[int] = model(**__lowerCamelCase , training=__lowerCamelCase ) # verify the logits __UpperCAmelCase : List[str] = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) __UpperCAmelCase : str = tf.constant([-0.41_80, -1.50_51, -3.48_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 )
342
from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=1e-1_2 ) -> str: __UpperCAmelCase : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T __UpperCAmelCase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T return jnp.matmul(snake_case__, norm_emb_a.T ) class _snake_case ( nn.Module ): lowerCamelCase__: CLIPConfig lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: Any ) -> Tuple: __UpperCAmelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config ) __UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype ) __UpperCAmelCase : int = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) ) __UpperCAmelCase : int = self.param( "special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) ) __UpperCAmelCase : Tuple = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) ) __UpperCAmelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) ) def __call__( self: List[Any] , __lowerCamelCase: Dict ) -> Dict: __UpperCAmelCase : Optional[int] = self.vision_model(__lowerCamelCase )[1] __UpperCAmelCase : List[str] = self.visual_projection(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds ) __UpperCAmelCase : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs __UpperCAmelCase : List[str] = 0.0 __UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment __UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 ) __UpperCAmelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase ) # Use a lower threshold if an image has any special care concept __UpperCAmelCase : List[Any] = is_special_care * 0.01 __UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment __UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 ) __UpperCAmelCase : Any = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class _snake_case ( _lowercase ): lowerCamelCase__: int = CLIPConfig lowerCamelCase__: Tuple = "clip_input" lowerCamelCase__: str = FlaxStableDiffusionSafetyCheckerModule def __init__( self: Union[str, Any] , __lowerCamelCase: CLIPConfig , __lowerCamelCase: Optional[Tuple] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: jnp.dtype = jnp.floataa , __lowerCamelCase: bool = True , **__lowerCamelCase: Optional[int] , ) -> int: if input_shape is None: __UpperCAmelCase : Dict = (1, 2_24, 2_24, 3) __UpperCAmelCase : Tuple = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase ) super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: jax.random.KeyArray , __lowerCamelCase: Tuple , __lowerCamelCase: FrozenDict = None ) -> FrozenDict: # init input tensor __UpperCAmelCase : Tuple = jax.random.normal(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Dict = jax.random.split(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng} __UpperCAmelCase : str = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"] return random_params def __call__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: dict = None , ) -> List[Any]: __UpperCAmelCase : int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) ) return self.module.apply( {"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
342
1
def _UpperCamelCase ( snake_case__ = 200_0000 ) -> int: __UpperCAmelCase : Optional[Any] = [0 for i in range(n + 1 )] __UpperCAmelCase : List[Any] = 1 __UpperCAmelCase : Tuple = 1 for i in range(2, int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i, n + 1, snake_case__ ): __UpperCAmelCase : Any = 1 __UpperCAmelCase : int = 0 for i in range(snake_case__ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F'{solution() = }')
342
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def _UpperCamelCase ( snake_case__ ) -> Tuple: __UpperCAmelCase : Union[str, Any] = 384 if "tiny" in model_name: __UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3] __UpperCAmelCase : List[Any] = [96, 192, 384, 768] if "small" in model_name: __UpperCAmelCase : Tuple = [3, 3, 27, 3] __UpperCAmelCase : Any = [96, 192, 384, 768] if "base" in model_name: __UpperCAmelCase : str = [3, 3, 27, 3] __UpperCAmelCase : str = [128, 256, 512, 1024] __UpperCAmelCase : str = 512 if "large" in model_name: __UpperCAmelCase : Dict = [3, 3, 27, 3] __UpperCAmelCase : int = [192, 384, 768, 1536] __UpperCAmelCase : Dict = 768 if "xlarge" in model_name: __UpperCAmelCase : List[Any] = [3, 3, 27, 3] __UpperCAmelCase : Tuple = [256, 512, 1024, 2048] __UpperCAmelCase : int = 1024 # set label information __UpperCAmelCase : List[Any] = 150 __UpperCAmelCase : str = "huggingface/label-files" __UpperCAmelCase : List[Any] = "ade20k-id2label.json" __UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) ) __UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()} __UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()} __UpperCAmelCase : int = ConvNextConfig( depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] ) __UpperCAmelCase : int = UperNetConfig( backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, ) return config def _UpperCamelCase ( snake_case__ ) -> Tuple: __UpperCAmelCase : Optional[int] = [] # fmt: off # stem rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") ) rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") ) rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") ) rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any: __UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ ) __UpperCAmelCase : Optional[int] = val def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]: __UpperCAmelCase : Dict = { "upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth", "upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth", "upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth", "upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth", "upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth", } __UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name] __UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"] __UpperCAmelCase : Dict = get_upernet_config(snake_case__ ) __UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): __UpperCAmelCase : str = state_dict.pop(snake_case__ ) if "bn" in key: __UpperCAmelCase : int = key.replace("bn", "batch_norm" ) __UpperCAmelCase : Union[str, Any] = val # rename keys __UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__, snake_case__, snake_case__ ) model.load_state_dict(snake_case__ ) # verify on image __UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" __UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" ) __UpperCAmelCase : str = SegformerImageProcessor() __UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values with torch.no_grad(): __UpperCAmelCase : Union[str, Any] = model(snake_case__ ) if model_name == "upernet-convnext-tiny": __UpperCAmelCase : Any = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ) elif model_name == "upernet-convnext-small": __UpperCAmelCase : Optional[Any] = torch.tensor( [[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] ) elif model_name == "upernet-convnext-base": __UpperCAmelCase : Dict = torch.tensor( [[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] ) elif model_name == "upernet-convnext-large": __UpperCAmelCase : Tuple = torch.tensor( [[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] ) elif model_name == "upernet-convnext-xlarge": __UpperCAmelCase : Union[str, Any] = torch.tensor( [[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] ) print("Logits:", outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case__ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(snake_case__ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-convnext-tiny''', type=str, choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']], help='''Name of the ConvNext UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _snake_case = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
342
1
from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def _UpperCamelCase ( snake_case__ = "laptop" ) -> DataFrame: __UpperCAmelCase : Tuple = f'''https://www.amazon.in/laptop/s?k={product}''' __UpperCAmelCase : Dict = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36", "Accept-Language": "en-US, en;q=0.5", } __UpperCAmelCase : Any = BeautifulSoup(requests.get(snake_case__, headers=snake_case__ ).text ) # Initialize a Pandas dataframe with the column titles __UpperCAmelCase : Union[str, Any] = DataFrame( columns=[ "Product Title", "Product Link", "Current Price of the product", "Product Rating", "MRP of the product", "Discount", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( "div", attrs={"class": "s-result-item", "data-component-type": "s-search-result"}, ), soup.find_all("div", attrs={"class": "a-row a-size-base a-color-base"} ), ): try: __UpperCAmelCase : List[str] = item.ha.text __UpperCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"] __UpperCAmelCase : Optional[int] = item.find("span", attrs={"class": "a-offscreen"} ).text try: __UpperCAmelCase : Tuple = item.find("span", attrs={"class": "a-icon-alt"} ).text except AttributeError: __UpperCAmelCase : Any = "Not available" try: __UpperCAmelCase : List[str] = ( "₹" + item.find( "span", attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1] ) except AttributeError: __UpperCAmelCase : Optional[Any] = "" try: __UpperCAmelCase : Optional[int] = float( ( ( float(product_mrp.strip("₹" ).replace(",", "" ) ) - float(product_price.strip("₹" ).replace(",", "" ) ) ) / float(product_mrp.strip("₹" ).replace(",", "" ) ) ) * 100 ) except ValueError: __UpperCAmelCase : int = float("nan" ) except AttributeError: pass __UpperCAmelCase : int = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] __UpperCAmelCase : List[Any] = " " __UpperCAmelCase : int = " " data_frame.index += 1 return data_frame if __name__ == "__main__": _snake_case = '''headphones''' get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
342
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class _snake_case ( _lowercase ): lowerCamelCase__: Dict = "roc_bert" def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]: __UpperCAmelCase : str = vocab_size __UpperCAmelCase : Dict = max_position_embeddings __UpperCAmelCase : Optional[Any] = hidden_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : List[Any] = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : Optional[int] = attention_probs_dropout_prob __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : Optional[Any] = type_vocab_size __UpperCAmelCase : List[Any] = layer_norm_eps __UpperCAmelCase : Optional[int] = use_cache __UpperCAmelCase : Optional[Any] = enable_pronunciation __UpperCAmelCase : Any = enable_shape __UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim __UpperCAmelCase : Optional[Any] = pronunciation_vocab_size __UpperCAmelCase : Optional[Any] = shape_embed_dim __UpperCAmelCase : List[Any] = shape_vocab_size __UpperCAmelCase : int = concat_input __UpperCAmelCase : int = position_embedding_type __UpperCAmelCase : Optional[int] = classifier_dropout super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
342
1
from __future__ import annotations from math import pow, sqrt def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]: if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance == 0: return {"resistance": sqrt(pow(snake_case__, 2 ) - pow(snake_case__, 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(snake_case__, 2 ) - pow(snake_case__, 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(snake_case__, 2 ) + pow(snake_case__, 2 ) )} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
342
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def _UpperCamelCase ( snake_case__ ) -> int: __UpperCAmelCase : int = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: __UpperCAmelCase : int = [144, 192, 240] __UpperCAmelCase : Optional[Any] = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: __UpperCAmelCase : Optional[Any] = [96, 120, 144] __UpperCAmelCase : Tuple = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: __UpperCAmelCase : str = [64, 80, 96] __UpperCAmelCase : Optional[Any] = [16, 16, 24, 48, 64, 80, 320] __UpperCAmelCase : Tuple = 0.05 __UpperCAmelCase : Dict = 2.0 if mobilevit_name.startswith("deeplabv3_" ): __UpperCAmelCase : str = 512 __UpperCAmelCase : Any = 16 __UpperCAmelCase : str = 21 __UpperCAmelCase : Union[str, Any] = "pascal-voc-id2label.json" else: __UpperCAmelCase : Optional[Any] = 1000 __UpperCAmelCase : int = "imagenet-1k-id2label.json" __UpperCAmelCase : Dict = "huggingface/label-files" __UpperCAmelCase : int = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) ) __UpperCAmelCase : Any = {int(snake_case__ ): v for k, v in idalabel.items()} __UpperCAmelCase : int = idalabel __UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()} return config def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Tuple: for i in range(1, 6 ): if f'''layer_{i}.''' in name: __UpperCAmelCase : Tuple = name.replace(f'''layer_{i}.''', f'''encoder.layer.{i - 1}.''' ) if "conv_1." in name: __UpperCAmelCase : Dict = name.replace("conv_1.", "conv_stem." ) if ".block." in name: __UpperCAmelCase : Optional[int] = name.replace(".block.", "." ) if "exp_1x1" in name: __UpperCAmelCase : Tuple = name.replace("exp_1x1", "expand_1x1" ) if "red_1x1" in name: __UpperCAmelCase : Optional[Any] = name.replace("red_1x1", "reduce_1x1" ) if ".local_rep.conv_3x3." in name: __UpperCAmelCase : Optional[int] = name.replace(".local_rep.conv_3x3.", ".conv_kxk." ) if ".local_rep.conv_1x1." in name: __UpperCAmelCase : Any = name.replace(".local_rep.conv_1x1.", ".conv_1x1." ) if ".norm." in name: __UpperCAmelCase : Dict = name.replace(".norm.", ".normalization." ) if ".conv." in name: __UpperCAmelCase : List[Any] = name.replace(".conv.", ".convolution." ) if ".conv_proj." in name: __UpperCAmelCase : List[str] = name.replace(".conv_proj.", ".conv_projection." ) for i in range(0, 2 ): for j in range(0, 4 ): if f'''.{i}.{j}.''' in name: __UpperCAmelCase : List[Any] = name.replace(f'''.{i}.{j}.''', f'''.{i}.layer.{j}.''' ) for i in range(2, 6 ): for j in range(0, 4 ): if f'''.{i}.{j}.''' in name: __UpperCAmelCase : Any = name.replace(f'''.{i}.{j}.''', f'''.{i}.''' ) if "expand_1x1" in name: __UpperCAmelCase : Optional[int] = name.replace("expand_1x1", "downsampling_layer.expand_1x1" ) if "conv_3x3" in name: __UpperCAmelCase : List[Any] = name.replace("conv_3x3", "downsampling_layer.conv_3x3" ) if "reduce_1x1" in name: __UpperCAmelCase : Dict = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" ) for i in range(2, 5 ): if f'''.global_rep.{i}.weight''' in name: __UpperCAmelCase : Any = name.replace(f'''.global_rep.{i}.weight''', ".layernorm.weight" ) if f'''.global_rep.{i}.bias''' in name: __UpperCAmelCase : Optional[Any] = name.replace(f'''.global_rep.{i}.bias''', ".layernorm.bias" ) if ".global_rep." in name: __UpperCAmelCase : Tuple = name.replace(".global_rep.", ".transformer." ) if ".pre_norm_mha.0." in name: __UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_mha.0.", ".layernorm_before." ) if ".pre_norm_mha.1.out_proj." in name: __UpperCAmelCase : Tuple = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." ) if ".pre_norm_ffn.0." in name: __UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_ffn.0.", ".layernorm_after." ) if ".pre_norm_ffn.1." in name: __UpperCAmelCase : Dict = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." ) if ".pre_norm_ffn.4." in name: __UpperCAmelCase : int = name.replace(".pre_norm_ffn.4.", ".output.dense." ) if ".transformer." in name: __UpperCAmelCase : Tuple = name.replace(".transformer.", ".transformer.layer." ) if ".aspp_layer." in name: __UpperCAmelCase : Any = name.replace(".aspp_layer.", "." ) if ".aspp_pool." in name: __UpperCAmelCase : Optional[Any] = name.replace(".aspp_pool.", "." ) if "seg_head." in name: __UpperCAmelCase : Optional[int] = name.replace("seg_head.", "segmentation_head." ) if "segmentation_head.classifier.classifier." in name: __UpperCAmelCase : str = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." ) if "classifier.fc." in name: __UpperCAmelCase : Optional[Any] = name.replace("classifier.fc.", "classifier." ) elif (not base_model) and ("segmentation_head." not in name): __UpperCAmelCase : List[str] = "mobilevit." + name return name def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=False ) -> Union[str, Any]: if base_model: __UpperCAmelCase : Optional[int] = "" else: __UpperCAmelCase : Tuple = "mobilevit." for key in orig_state_dict.copy().keys(): __UpperCAmelCase : Optional[int] = orig_state_dict.pop(snake_case__ ) if key[:8] == "encoder.": __UpperCAmelCase : str = key[8:] if "qkv" in key: __UpperCAmelCase : Tuple = key.split("." ) __UpperCAmelCase : List[Any] = int(key_split[0][6:] ) - 1 __UpperCAmelCase : Optional[Any] = int(key_split[3] ) __UpperCAmelCase : Tuple = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' ) __UpperCAmelCase : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size __UpperCAmelCase : Optional[Any] = ( f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.''' ) if "weight" in key: __UpperCAmelCase : Any = val[:dim, :] __UpperCAmelCase : Any = val[dim : dim * 2, :] __UpperCAmelCase : List[Any] = val[-dim:, :] else: __UpperCAmelCase : List[str] = val[:dim] __UpperCAmelCase : Optional[Any] = val[dim : dim * 2] __UpperCAmelCase : List[Any] = val[-dim:] else: __UpperCAmelCase : str = val return orig_state_dict def _UpperCamelCase ( ) -> Any: __UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" __UpperCAmelCase : List[str] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ) return im @torch.no_grad() def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=False ) -> Optional[Any]: __UpperCAmelCase : Tuple = get_mobilevit_config(snake_case__ ) # load original state_dict __UpperCAmelCase : str = torch.load(snake_case__, map_location="cpu" ) # load 🤗 model if mobilevit_name.startswith("deeplabv3_" ): __UpperCAmelCase : Optional[int] = MobileViTForSemanticSegmentation(snake_case__ ).eval() else: __UpperCAmelCase : List[Any] = MobileViTForImageClassification(snake_case__ ).eval() __UpperCAmelCase : Dict = convert_state_dict(snake_case__, snake_case__ ) model.load_state_dict(snake_case__ ) # Check outputs on an image, prepared by MobileViTImageProcessor __UpperCAmelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 ) __UpperCAmelCase : Any = image_processor(images=prepare_img(), return_tensors="pt" ) __UpperCAmelCase : Dict = model(**snake_case__ ) __UpperCAmelCase : Tuple = outputs.logits if mobilevit_name.startswith("deeplabv3_" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": __UpperCAmelCase : int = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": __UpperCAmelCase : Tuple = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": __UpperCAmelCase : Any = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ] ) else: raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3, :3, :3], snake_case__, atol=1e-4 ) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": __UpperCAmelCase : str = torch.tensor([-0.9866, 0.2392, -1.1241] ) elif mobilevit_name == "mobilevit_xs": __UpperCAmelCase : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] ) elif mobilevit_name == "mobilevit_xxs": __UpperCAmelCase : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ) else: raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3], snake_case__, atol=1e-4 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case__ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case__ ) if push_to_hub: __UpperCAmelCase : List[str] = { "mobilevit_s": "mobilevit-small", "mobilevit_xs": "mobilevit-x-small", "mobilevit_xxs": "mobilevit-xx-small", "deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small", "deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small", "deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small", } print("Pushing to the hub..." ) __UpperCAmelCase : int = model_mapping[mobilevit_name] image_processor.push_to_hub(snake_case__, organization="apple" ) model.push_to_hub(snake_case__, organization="apple" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--mobilevit_name''', default='''mobilevit_s''', type=str, help=( '''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',''' ''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.''' ), ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _snake_case = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
342
1
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging _snake_case = logging.get_logger(__name__) def _UpperCamelCase ( snake_case__ ) -> List[str]: __UpperCAmelCase : Any = r'\w+[.]\d+' __UpperCAmelCase : List[str] = re.findall(_UpperCAmelCase, _UpperCAmelCase ) for pat in pats: __UpperCAmelCase : Union[str, Any] = key.replace(_UpperCAmelCase, "_".join(pat.split("." ) ) ) return key def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Optional[Any]: __UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ('scale',) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): __UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ('scale',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: __UpperCAmelCase : str = pt_tuple_key[:-1] + ('scale',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: __UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ('embedding',) return renamed_pt_tuple_key, pt_tensor # conv layer __UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: __UpperCAmelCase : str = pt_tensor.transpose(2, 3, 1, 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer __UpperCAmelCase : str = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight": __UpperCAmelCase : Optional[Any] = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight __UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ('weight',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias __UpperCAmelCase : Optional[Any] = pt_tuple_key[:-1] + ('bias',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=42 ) -> Union[str, Any]: # Step 1: Convert pytorch tensor to numpy __UpperCAmelCase : str = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params __UpperCAmelCase : str = flax_model.init_weights(PRNGKey(_UpperCAmelCase ) ) __UpperCAmelCase : Optional[Any] = flatten_dict(_UpperCAmelCase ) __UpperCAmelCase : List[Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): __UpperCAmelCase : List[str] = rename_key(_UpperCAmelCase ) __UpperCAmelCase : int = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters __UpperCAmelCase : List[Any] = rename_key_and_reshape_tensor(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown __UpperCAmelCase : List[Any] = jnp.asarray(_UpperCAmelCase ) return unflatten_dict(_UpperCAmelCase )
350
import math _snake_case = 10 _snake_case = 7 _snake_case = BALLS_PER_COLOUR * NUM_COLOURS def _UpperCamelCase ( snake_case__ = 20 ) -> str: __UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ ) __UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ ) __UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total) return f'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
342
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = '''▁''' _snake_case = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''} _snake_case = { '''vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''', }, '''monolingual_vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''', }, } _snake_case = {'''vinai/bartpho-syllable''': 1024} class _snake_case ( A__ ): lowerCamelCase__: Union[str, Any] = VOCAB_FILES_NAMES lowerCamelCase__: Dict = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__: List[str] = ["input_ids", "attention_mask"] def __init__( self: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: str="<s>" , __lowerCamelCase: Optional[Any]="</s>" , __lowerCamelCase: int="</s>" , __lowerCamelCase: str="<s>" , __lowerCamelCase: Any="<unk>" , __lowerCamelCase: List[Any]="<pad>" , __lowerCamelCase: str="<mask>" , __lowerCamelCase: Dict = None , **__lowerCamelCase: Optional[int] , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Dict = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token __UpperCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , ) __UpperCAmelCase : Any = vocab_file __UpperCAmelCase : Optional[int] = monolingual_vocab_file __UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__A ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility __UpperCAmelCase : Tuple = {} __UpperCAmelCase : List[str] = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__A ) not in self.fairseq_tokens_to_ids: __UpperCAmelCase : List[str] = cnt cnt += 1 with open(__A , "r" , encoding="utf-8" ) as f: for line in f.readlines(): __UpperCAmelCase : int = line.strip().split()[0] __UpperCAmelCase : int = len(self.fairseq_tokens_to_ids ) if str(__A ) not in self.fairseq_tokens_to_ids: __UpperCAmelCase : Union[str, Any] = len(self.fairseq_tokens_to_ids ) __UpperCAmelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self: Optional[int] ) -> Any: __UpperCAmelCase : Optional[Any] = self.__dict__.copy() __UpperCAmelCase : str = None __UpperCAmelCase : str = self.sp_model.serialized_model_proto() return state def __setstate__( self: List[str] , __lowerCamelCase: str ) -> Any: __UpperCAmelCase : Optional[int] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __UpperCAmelCase : int = {} __UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCamelCase ( self: Any , __lowerCamelCase: List[str] , __lowerCamelCase: Any = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase : Tuple = [self.cls_token_id] __UpperCAmelCase : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple = None , __lowerCamelCase: int = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1] def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] = None ) -> List[int]: __UpperCAmelCase : Optional[int] = [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCamelCase ( self: str ) -> List[Any]: return len(self.fairseq_ids_to_tokens ) def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]: __UpperCAmelCase : str = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Optional[Any] ) -> List[str]: return self.sp_model.encode(__A , out_type=__A ) def _lowerCamelCase ( self: str , __lowerCamelCase: int ) -> int: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _lowerCamelCase ( self: Dict , __lowerCamelCase: int ) -> List[str]: return self.fairseq_ids_to_tokens[index] def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: List[str] ) -> List[Any]: __UpperCAmelCase : int = """""".join(__A ).replace(__A , " " ).strip() return out_string def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Tuple = None ) -> Tuple[str]: if not os.path.isdir(__A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase : List[Any] = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __UpperCAmelCase : Tuple = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __A ) elif not os.path.isfile(self.vocab_file ): with open(__A , "wb" ) as fi: __UpperCAmelCase : int = self.sp_model.serialized_model_proto() fi.write(__A ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __A ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __A ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__A , "w" , encoding="utf-8" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__A )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
351
def _UpperCamelCase ( snake_case__ ) -> int: __UpperCAmelCase : int = [0] * len(snake_case__ ) __UpperCAmelCase : Union[str, Any] = [] __UpperCAmelCase : str = [1] * len(snake_case__ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(snake_case__ ) ): if indegree[i] == 0: queue.append(snake_case__ ) while queue: __UpperCAmelCase : List[str] = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: __UpperCAmelCase : str = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(snake_case__ ) print(max(snake_case__ ) ) # Adjacency list of Graph _snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
342
0
import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor _snake_case = logging.get_logger(__name__) class _snake_case ( _UpperCAmelCase ): def __init__( self: Optional[Any] , *__lowerCamelCase: List[Any] , **__lowerCamelCase: List[Any] ) -> Dict: warnings.warn( "The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use VideoMAEImageProcessor instead." , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
352
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _snake_case = { '''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''], '''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''], '''processing_whisper''': ['''WhisperProcessor'''], '''tokenization_whisper''': ['''WhisperTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''WhisperTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WhisperForConditionalGeneration''', '''WhisperModel''', '''WhisperPreTrainedModel''', '''WhisperForAudioClassification''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWhisperForConditionalGeneration''', '''TFWhisperModel''', '''TFWhisperPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''FlaxWhisperForConditionalGeneration''', '''FlaxWhisperModel''', '''FlaxWhisperPreTrainedModel''', '''FlaxWhisperForAudioClassification''', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
342
0
from __future__ import annotations from random import choice def _UpperCamelCase ( snake_case__ ) -> str: return choice(__lowerCAmelCase ) def _UpperCamelCase ( snake_case__, snake_case__ ) -> int: __UpperCAmelCase : Tuple = random_pivot(__lowerCAmelCase ) # partition based on pivot # linear time __UpperCAmelCase : Optional[Any] = [e for e in lst if e < pivot] __UpperCAmelCase : str = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(__lowerCAmelCase ) == k - 1: return pivot # pivot is in elements bigger than k elif len(__lowerCAmelCase ) < k - 1: return kth_number(__lowerCAmelCase, k - len(__lowerCAmelCase ) - 1 ) # pivot is in elements smaller than k else: return kth_number(__lowerCAmelCase, __lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
353
from __future__ import annotations from math import pi def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]: if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
342
0
class _snake_case : def __init__( self: Tuple , __lowerCamelCase: list[int] ) -> Dict: __UpperCAmelCase : Dict = len(_lowerCamelCase ) __UpperCAmelCase : Any = [0] * len_array if len_array > 0: __UpperCAmelCase : int = array[0] for i in range(1 , _lowerCamelCase ): __UpperCAmelCase : Union[str, Any] = self.prefix_sum[i - 1] + array[i] def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> Tuple: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: int ) -> List[str]: __UpperCAmelCase : List[Any] = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(_lowerCamelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
354
import flax.linen as nn import jax import jax.numpy as jnp class _snake_case ( nn.Module ): lowerCamelCase__: int lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]: __UpperCAmelCase : List[str] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Optional[Any] , __lowerCamelCase: Optional[int] ) -> List[Any]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = hidden_states.shape __UpperCAmelCase : Dict = jax.image.resize( __lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , ) __UpperCAmelCase : Dict = self.conv(__lowerCamelCase ) return hidden_states class _snake_case ( nn.Module ): lowerCamelCase__: int lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: str ) -> Any: __UpperCAmelCase : Optional[int] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Dict , __lowerCamelCase: str ) -> List[Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) __UpperCAmelCase : Any = self.conv(__lowerCamelCase ) return hidden_states class _snake_case ( nn.Module ): lowerCamelCase__: int lowerCamelCase__: int = None lowerCamelCase__: float = 0.0 lowerCamelCase__: bool = None lowerCamelCase__: jnp.dtype = jnp.floataa def _lowerCamelCase ( self: str ) -> List[str]: __UpperCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels __UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) __UpperCAmelCase : List[str] = nn.Conv( __lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __UpperCAmelCase : Optional[Any] = nn.Dense(__lowerCamelCase , dtype=self.dtype ) __UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) __UpperCAmelCase : Optional[Any] = nn.Dropout(self.dropout_prob ) __UpperCAmelCase : Tuple = nn.Conv( __lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __UpperCAmelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __UpperCAmelCase : List[Any] = None if use_nin_shortcut: __UpperCAmelCase : Dict = nn.Conv( __lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , ) def __call__( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=True ) -> List[Any]: __UpperCAmelCase : Dict = hidden_states __UpperCAmelCase : int = self.norma(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = nn.swish(__lowerCamelCase ) __UpperCAmelCase : Tuple = self.conva(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) ) __UpperCAmelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 ) __UpperCAmelCase : List[str] = hidden_states + temb __UpperCAmelCase : Union[str, Any] = self.norma(__lowerCamelCase ) __UpperCAmelCase : Tuple = nn.swish(__lowerCamelCase ) __UpperCAmelCase : str = self.dropout(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : List[str] = self.conva(__lowerCamelCase ) if self.conv_shortcut is not None: __UpperCAmelCase : Optional[int] = self.conv_shortcut(__lowerCamelCase ) return hidden_states + residual
342
0
_snake_case = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1 def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> List[str]: if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any: if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
355
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss _snake_case = pytest.mark.integration @require_faiss class _snake_case ( _lowercase ): def _lowerCamelCase ( self: Union[str, Any] ) -> str: __UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} ) return dset def _lowerCamelCase ( self: Optional[Any] ) -> Tuple: import faiss __UpperCAmelCase : Dataset = self._create_dummy_dataset() __UpperCAmelCase : int = dset.map( lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase ) __UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT ) __UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def _lowerCamelCase ( self: List[str] ) -> int: import faiss __UpperCAmelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def _lowerCamelCase ( self: Optional[int] ) -> Dict: import faiss __UpperCAmelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def _lowerCamelCase ( self: List[Any] ) -> List[Any]: __UpperCAmelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def _lowerCamelCase ( self: List[str] ) -> Dict: from elasticsearch import Elasticsearch __UpperCAmelCase : Dataset = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: __UpperCAmelCase : int = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) __UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} __UpperCAmelCase : Any = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class _snake_case ( _lowercase ): def _lowerCamelCase ( self: List[str] ) -> Optional[int]: import faiss __UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa ) __UpperCAmelCase : List[str] = 1 __UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase ) self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1] __UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase ) self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] ) __UpperCAmelCase : Dict = [scores[0] for scores in total_scores] __UpperCAmelCase : int = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCamelCase ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase ) def _lowerCamelCase ( self: Any ) -> List[str]: import faiss __UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(__lowerCamelCase ): __UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def _lowerCamelCase ( self: List[str] ) -> Dict: import faiss __UpperCAmelCase : str = faiss.IndexFlat(5 ) __UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def _lowerCamelCase ( self: Union[str, Any] ) -> int: import faiss __UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file: index.save(tmp_file.name ) __UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa ) __UpperCAmelCase : Tuple = 1 __UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def _UpperCamelCase ( snake_case__ ) -> Optional[Any]: import faiss __UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5, dtype=np.floataa ) ) __UpperCAmelCase : Optional[Any] = "index.faiss" __UpperCAmelCase : Optional[int] = f'''mock://{index_name}''' index.save(snake_case__, storage_options=mockfs.storage_options ) __UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options ) __UpperCAmelCase : str = np.zeros(5, dtype=np.floataa ) __UpperCAmelCase : Any = 1 __UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _snake_case ( _lowercase ): def _lowerCamelCase ( self: str ) -> Union[str, Any]: from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: __UpperCAmelCase : Optional[Any] = Elasticsearch() __UpperCAmelCase : Dict = {"acknowledged": True} __UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query __UpperCAmelCase : Dict = "foo" __UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} __UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __UpperCAmelCase : int = "foo" __UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __UpperCAmelCase : int = ["foo", "bar", "foobar"] __UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} __UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase ) __UpperCAmelCase : Tuple = [scores[0] for scores in total_scores] __UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCamelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __lowerCamelCase ) # batched queries with timeout __UpperCAmelCase : str = ["foo", "bar", "foobar"] __UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 ) __UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores] __UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCamelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __lowerCamelCase )
342
0
"""simple docstring""" import unittest from knapsack import greedy_knapsack as kp class _snake_case ( unittest.TestCase ): def _lowerCamelCase ( self: str ) -> Dict: __UpperCAmelCase : Tuple = [10, 20, 30, 40, 50, 60] __UpperCAmelCase : str = [2, 4, 6, 8, 10, 12] __UpperCAmelCase : List[str] = 1_00 self.assertEqual(kp.calc_profit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , 2_10 ) def _lowerCamelCase ( self: str ) -> Dict: self.assertRaisesRegex(__lowerCamelCase , "max_weight must greater than zero." ) def _lowerCamelCase ( self: List[Any] ) -> List[str]: self.assertRaisesRegex(__lowerCamelCase , "Weight can not be negative." ) def _lowerCamelCase ( self: Any ) -> List[Any]: self.assertRaisesRegex(__lowerCamelCase , "Profit can not be negative." ) def _lowerCamelCase ( self: List[str] ) -> Dict: self.assertRaisesRegex(__lowerCamelCase , "max_weight must greater than zero." ) def _lowerCamelCase ( self: Tuple ) -> Tuple: self.assertRaisesRegex( __lowerCamelCase , "The length of profit and weight must be same." ) if __name__ == "__main__": unittest.main()
356
import argparse import struct import unittest class _snake_case : def __init__( self: Tuple , __lowerCamelCase: bytes ) -> None: __UpperCAmelCase : Tuple = data # Initialize hash values __UpperCAmelCase : Any = [ 0x6_A_0_9_E_6_6_7, 0xB_B_6_7_A_E_8_5, 0x3_C_6_E_F_3_7_2, 0xA_5_4_F_F_5_3_A, 0x5_1_0_E_5_2_7_F, 0x9_B_0_5_6_8_8_C, 0x1_F_8_3_D_9_A_B, 0x5_B_E_0_C_D_1_9, ] # Initialize round constants __UpperCAmelCase : Dict = [ 0x4_2_8_A_2_F_9_8, 0x7_1_3_7_4_4_9_1, 0xB_5_C_0_F_B_C_F, 0xE_9_B_5_D_B_A_5, 0x3_9_5_6_C_2_5_B, 0x5_9_F_1_1_1_F_1, 0x9_2_3_F_8_2_A_4, 0xA_B_1_C_5_E_D_5, 0xD_8_0_7_A_A_9_8, 0x1_2_8_3_5_B_0_1, 0x2_4_3_1_8_5_B_E, 0x5_5_0_C_7_D_C_3, 0x7_2_B_E_5_D_7_4, 0x8_0_D_E_B_1_F_E, 0x9_B_D_C_0_6_A_7, 0xC_1_9_B_F_1_7_4, 0xE_4_9_B_6_9_C_1, 0xE_F_B_E_4_7_8_6, 0x0_F_C_1_9_D_C_6, 0x2_4_0_C_A_1_C_C, 0x2_D_E_9_2_C_6_F, 0x4_A_7_4_8_4_A_A, 0x5_C_B_0_A_9_D_C, 0x7_6_F_9_8_8_D_A, 0x9_8_3_E_5_1_5_2, 0xA_8_3_1_C_6_6_D, 0xB_0_0_3_2_7_C_8, 0xB_F_5_9_7_F_C_7, 0xC_6_E_0_0_B_F_3, 0xD_5_A_7_9_1_4_7, 0x0_6_C_A_6_3_5_1, 0x1_4_2_9_2_9_6_7, 0x2_7_B_7_0_A_8_5, 0x2_E_1_B_2_1_3_8, 0x4_D_2_C_6_D_F_C, 0x5_3_3_8_0_D_1_3, 0x6_5_0_A_7_3_5_4, 0x7_6_6_A_0_A_B_B, 0x8_1_C_2_C_9_2_E, 0x9_2_7_2_2_C_8_5, 0xA_2_B_F_E_8_A_1, 0xA_8_1_A_6_6_4_B, 0xC_2_4_B_8_B_7_0, 0xC_7_6_C_5_1_A_3, 0xD_1_9_2_E_8_1_9, 0xD_6_9_9_0_6_2_4, 0xF_4_0_E_3_5_8_5, 0x1_0_6_A_A_0_7_0, 0x1_9_A_4_C_1_1_6, 0x1_E_3_7_6_C_0_8, 0x2_7_4_8_7_7_4_C, 0x3_4_B_0_B_C_B_5, 0x3_9_1_C_0_C_B_3, 0x4_E_D_8_A_A_4_A, 0x5_B_9_C_C_A_4_F, 0x6_8_2_E_6_F_F_3, 0x7_4_8_F_8_2_E_E, 0x7_8_A_5_6_3_6_F, 0x8_4_C_8_7_8_1_4, 0x8_C_C_7_0_2_0_8, 0x9_0_B_E_F_F_F_A, 0xA_4_5_0_6_C_E_B, 0xB_E_F_9_A_3_F_7, 0xC_6_7_1_7_8_F_2, ] __UpperCAmelCase : List[Any] = self.preprocessing(self.data ) self.final_hash() @staticmethod def _lowerCamelCase ( __lowerCamelCase: bytes ) -> bytes: __UpperCAmelCase : List[str] = B"\x80" + (B"\x00" * (63 - (len(__lowerCamelCase ) + 8) % 64)) __UpperCAmelCase : int = struct.pack(">Q" , (len(__lowerCamelCase ) * 8) ) return data + padding + big_endian_integer def _lowerCamelCase ( self: Dict ) -> None: # Convert into blocks of 64 bytes __UpperCAmelCase : Dict = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __UpperCAmelCase : List[str] = list(struct.unpack(">16L" , __lowerCamelCase ) ) # add 48 0-ed integers words += [0] * 48 __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array __UpperCAmelCase : Union[str, Any] = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) __UpperCAmelCase : str = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) __UpperCAmelCase : Union[str, Any] = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_0_0_0_0_0_0_0_0 # Compression __UpperCAmelCase : Union[str, Any] = self.ror(__lowerCamelCase , 6 ) ^ self.ror(__lowerCamelCase , 11 ) ^ self.ror(__lowerCamelCase , 25 ) __UpperCAmelCase : Tuple = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g) __UpperCAmelCase : int = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_0_0_0_0_0_0_0_0 __UpperCAmelCase : List[Any] = self.ror(__lowerCamelCase , 2 ) ^ self.ror(__lowerCamelCase , 13 ) ^ self.ror(__lowerCamelCase , 22 ) __UpperCAmelCase : Dict = (a & b) ^ (a & c) ^ (b & c) __UpperCAmelCase : int = (sa + maj) % 0x1_0_0_0_0_0_0_0_0 __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = ( g, f, e, ((d + tempa) % 0x1_0_0_0_0_0_0_0_0), c, b, a, ((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0), ) __UpperCAmelCase : Optional[int] = [a, b, c, d, e, f, g, h] # Modify final values __UpperCAmelCase : List[str] = [ ((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0) for index, element in enumerate(self.hashes ) ] __UpperCAmelCase : int = "".join([hex(__lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> int: return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations) class _snake_case ( unittest.TestCase ): def _lowerCamelCase ( self: List[Any] ) -> None: import hashlib __UpperCAmelCase : Dict = bytes("Test String" , "utf-8" ) self.assertEqual(SHAaaa(__lowerCamelCase ).hash , hashlib.shaaaa(__lowerCamelCase ).hexdigest() ) def _UpperCamelCase ( ) -> None: import doctest doctest.testmod() __UpperCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument( "-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", ) parser.add_argument( "-f", "--file", dest="input_file", help="Hash contents of a file" ) __UpperCAmelCase : List[Any] = parser.parse_args() __UpperCAmelCase : Optional[int] = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file, "rb" ) as f: __UpperCAmelCase : List[str] = f.read() else: __UpperCAmelCase : List[Any] = bytes(snake_case__, "utf-8" ) print(SHAaaa(snake_case__ ).hash ) if __name__ == "__main__": main()
342
0
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class _snake_case : def __init__( self: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int]=2 , __lowerCamelCase: str=8 , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Dict=True , __lowerCamelCase: int=True , __lowerCamelCase: Any=99 , __lowerCamelCase: Union[str, Any]=16 , __lowerCamelCase: Any=5 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: List[Any]=36 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: List[Any]=0.0 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: Optional[Any]=16 , __lowerCamelCase: int=2 , __lowerCamelCase: List[Any]=0.02 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Any=4 , __lowerCamelCase: Optional[Any]=None , ) -> Tuple: __UpperCAmelCase : Tuple = parent __UpperCAmelCase : Any = batch_size __UpperCAmelCase : Union[str, Any] = seq_length __UpperCAmelCase : List[Any] = is_training __UpperCAmelCase : int = use_input_mask __UpperCAmelCase : str = use_token_type_ids __UpperCAmelCase : Optional[Any] = use_labels __UpperCAmelCase : Dict = vocab_size __UpperCAmelCase : str = hidden_size __UpperCAmelCase : str = num_hidden_layers __UpperCAmelCase : int = num_attention_heads __UpperCAmelCase : Tuple = intermediate_size __UpperCAmelCase : str = hidden_act __UpperCAmelCase : int = hidden_dropout_prob __UpperCAmelCase : Any = attention_probs_dropout_prob __UpperCAmelCase : int = max_position_embeddings __UpperCAmelCase : Union[str, Any] = type_vocab_size __UpperCAmelCase : int = type_sequence_label_size __UpperCAmelCase : Optional[Any] = initializer_range __UpperCAmelCase : Dict = num_labels __UpperCAmelCase : Tuple = num_choices __UpperCAmelCase : Any = scope def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Dict = None if self.use_input_mask: __UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Any = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : List[Any] = None if self.use_labels: __UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCamelCase ( self: List[str] ) -> Any: return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , ) def _lowerCamelCase ( self: Optional[int] ) -> List[str]: __UpperCAmelCase : int = self.get_config() __UpperCAmelCase : Tuple = 3_00 return config def _lowerCamelCase ( self: Dict ) -> Dict: ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Optional[int] = self.prepare_config_and_inputs() __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: List[str] , __lowerCamelCase: Tuple , __lowerCamelCase: int ) -> Union[str, Any]: __UpperCAmelCase : List[Any] = MraModel(config=_snake_case ) model.to(_snake_case ) model.eval() __UpperCAmelCase : Optional[Any] = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) __UpperCAmelCase : int = model(_snake_case , token_type_ids=_snake_case ) __UpperCAmelCase : Union[str, Any] = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: Dict , ) -> Any: __UpperCAmelCase : int = True __UpperCAmelCase : Tuple = MraModel(_snake_case ) model.to(_snake_case ) model.eval() __UpperCAmelCase : List[str] = model( _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , ) __UpperCAmelCase : Union[str, Any] = model( _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , encoder_hidden_states=_snake_case , ) __UpperCAmelCase : int = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int ) -> int: __UpperCAmelCase : Union[str, Any] = MraForMaskedLM(config=_snake_case ) model.to(_snake_case ) model.eval() __UpperCAmelCase : Optional[Any] = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: Any ) -> str: __UpperCAmelCase : int = MraForQuestionAnswering(config=_snake_case ) model.to(_snake_case ) model.eval() __UpperCAmelCase : str = model( _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: int , __lowerCamelCase: List[Any] , __lowerCamelCase: int ) -> List[str]: __UpperCAmelCase : List[str] = self.num_labels __UpperCAmelCase : List[str] = MraForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() __UpperCAmelCase : Tuple = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: List[Any] , __lowerCamelCase: int , __lowerCamelCase: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: int ) -> int: __UpperCAmelCase : Optional[int] = self.num_labels __UpperCAmelCase : List[Any] = MraForTokenClassification(config=_snake_case ) model.to(_snake_case ) model.eval() __UpperCAmelCase : Tuple = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowerCamelCase ( self: str , __lowerCamelCase: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Any , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple: __UpperCAmelCase : int = self.num_choices __UpperCAmelCase : List[Any] = MraForMultipleChoice(config=_snake_case ) model.to(_snake_case ) model.eval() __UpperCAmelCase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Dict = model( _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowerCamelCase ( self: List[str] ) -> int: __UpperCAmelCase : int = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Union[str, Any] = config_and_inputs __UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _snake_case ( _lowercase , unittest.TestCase ): lowerCamelCase__: List[Any] = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCamelCase__: int = False lowerCamelCase__: str = False lowerCamelCase__: Optional[Any] = False lowerCamelCase__: Dict = False lowerCamelCase__: Any = () def _lowerCamelCase ( self: List[Any] ) -> str: __UpperCAmelCase : Optional[Any] = MraModelTester(self ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def _lowerCamelCase ( self: Optional[int] ) -> int: self.config_tester.run_common_tests() def _lowerCamelCase ( self: Optional[int] ) -> str: __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : Optional[int] = type self.model_tester.create_and_check_model(*_snake_case ) def _lowerCamelCase ( self: Union[str, Any] ) -> Any: __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_snake_case ) def _lowerCamelCase ( self: Tuple ) -> Dict: __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_snake_case ) def _lowerCamelCase ( self: Optional[int] ) -> List[Any]: __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_snake_case ) def _lowerCamelCase ( self: Dict ) -> Optional[Any]: __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_snake_case ) def _lowerCamelCase ( self: Optional[Any] ) -> List[str]: __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_snake_case ) @slow def _lowerCamelCase ( self: List[Any] ) -> List[str]: for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : List[str] = MraModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @unittest.skip(reason="MRA does not output attentions" ) def _lowerCamelCase ( self: int ) -> int: return @require_torch class _snake_case ( unittest.TestCase ): @slow def _lowerCamelCase ( self: List[str] ) -> Optional[Any]: __UpperCAmelCase : Optional[int] = MraModel.from_pretrained("uw-madison/mra-base-512-4" ) __UpperCAmelCase : str = torch.arange(2_56 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : Optional[int] = model(_snake_case )[0] __UpperCAmelCase : Optional[Any] = torch.Size((1, 2_56, 7_68) ) self.assertEqual(output.shape , _snake_case ) __UpperCAmelCase : int = torch.tensor( [[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4 ) ) @slow def _lowerCamelCase ( self: Union[str, Any] ) -> str: __UpperCAmelCase : List[str] = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" ) __UpperCAmelCase : Any = torch.arange(2_56 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : List[str] = model(_snake_case )[0] __UpperCAmelCase : Optional[int] = 5_02_65 __UpperCAmelCase : str = torch.Size((1, 2_56, vocab_size) ) self.assertEqual(output.shape , _snake_case ) __UpperCAmelCase : str = torch.tensor( [[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4 ) ) @slow def _lowerCamelCase ( self: List[str] ) -> Optional[int]: __UpperCAmelCase : Optional[int] = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" ) __UpperCAmelCase : List[str] = torch.arange(40_96 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : List[str] = model(_snake_case )[0] __UpperCAmelCase : Dict = 5_02_65 __UpperCAmelCase : str = torch.Size((1, 40_96, vocab_size) ) self.assertEqual(output.shape , _snake_case ) __UpperCAmelCase : Dict = torch.tensor( [[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4 ) )
357
import numpy as np import datasets _snake_case = ''' Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] ''' _snake_case = '''\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ''' _snake_case = ''' Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def _lowerCamelCase ( self: List[str] ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> List[str]: # convert to numpy arrays __UpperCAmelCase : int = np.array(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = np.array(__lowerCamelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction __UpperCAmelCase : str = X - np.mean(__lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = np.cov(reference_distribution.T ) try: __UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase ) except np.linalg.LinAlgError: __UpperCAmelCase : Optional[int] = np.linalg.pinv(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = np.dot(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
342
0
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=5 ) -> Dict: assert masked_input.count("<mask>" ) == 1 __UpperCAmelCase : List[str] = torch.tensor(tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ ) ).unsqueeze(0 ) # Batch size 1 __UpperCAmelCase : Union[str, Any] = model(lowerCamelCase_ )[0] # The last hidden-state is the first element of the output tuple __UpperCAmelCase : Any = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() __UpperCAmelCase : Union[str, Any] = logits[0, masked_index, :] __UpperCAmelCase : List[Any] = logits.softmax(dim=0 ) __UpperCAmelCase : List[Any] = prob.topk(k=lowerCamelCase_, dim=0 ) __UpperCAmelCase : Dict = """ """.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowerCamelCase_ ) )] ) __UpperCAmelCase : Union[str, Any] = tokenizer.mask_token __UpperCAmelCase : List[Any] = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ): __UpperCAmelCase : Tuple = predicted_token_bpe.replace("\u2581", " " ) if " {0}".format(lowerCamelCase_ ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(" {0}".format(lowerCamelCase_ ), lowerCamelCase_ ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(lowerCamelCase_, lowerCamelCase_ ), values[index].item(), predicted_token, ) ) return topk_filled_outputs _snake_case = CamembertTokenizer.from_pretrained('''camembert-base''') _snake_case = CamembertForMaskedLM.from_pretrained('''camembert-base''') model.eval() _snake_case = 'Le camembert est <mask> :)' print(fill_mask(masked_input, model, tokenizer, topk=3))
358
import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class _snake_case ( unittest.TestCase ): def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str: __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : Optional[int] = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : List[str] = use_attention_mask __UpperCAmelCase : Dict = use_token_type_ids __UpperCAmelCase : Optional[int] = use_labels __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Dict = num_hidden_layers __UpperCAmelCase : Dict = num_attention_heads __UpperCAmelCase : Tuple = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_act __UpperCAmelCase : Tuple = hidden_dropout_prob __UpperCAmelCase : str = attention_probs_dropout_prob __UpperCAmelCase : Optional[Any] = max_position_embeddings __UpperCAmelCase : Optional[int] = type_vocab_size __UpperCAmelCase : str = type_sequence_label_size __UpperCAmelCase : Tuple = initializer_range __UpperCAmelCase : str = num_choices def _lowerCamelCase ( self: Optional[Any] ) -> List[str]: __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : str = None if self.use_attention_mask: __UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Any = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , ) return config, input_ids, attention_mask def _lowerCamelCase ( self: str ) -> Any: __UpperCAmelCase : List[str] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs __UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class _snake_case ( _lowercase , unittest.TestCase ): lowerCamelCase__: str = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self: List[Any] ) -> Dict: __UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self ) @slow def _lowerCamelCase ( self: Tuple ) -> Optional[Any]: for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" ) __UpperCAmelCase : Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCamelCase ) @require_flax class _snake_case ( unittest.TestCase ): @slow def _lowerCamelCase ( self: int ) -> List[Any]: __UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" ) __UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] __UpperCAmelCase : str = (1, 11, 7_68) self.assertEqual(output.shape , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
342
0