code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer __A = logging.getLogger(__name__) def _lowerCamelCase() -> int: _lowerCAmelCase =argparse.ArgumentParser( description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" ) parser.add_argument( """--dataset_name""" , type=_A , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , ) parser.add_argument( """--dataset_config""" , type=_A , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" ) parser.add_argument( """--tokenizer_name_or_path""" , type=_A , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , ) parser.add_argument( """--shard_size""" , type=_A , default=1000 , help="""Number of entries to go in a single shard.""" , ) parser.add_argument("""--split""" , type=_A , default="""train""" , choices=["""train""", """test""", """validation"""] ) parser.add_argument( """--limit""" , default=_A , type=_A , help="""Limit the number of shards (used for debugging).""" , ) parser.add_argument( """--max_length""" , type=_A , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum""" """ sequence length that is a multiple of 8.""" , ) parser.add_argument( """--output_dir""" , default="""tf-tpu""" , type=_A , help="""Output directory where the TFRecord shards will be saved. If the""" """ path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord""" """ shards will be directly saved to a Google Cloud Storage bucket.""" , ) _lowerCAmelCase =parser.parse_args() return args def _lowerCamelCase(__UpperCamelCase ) -> Optional[int]: def fn(__UpperCamelCase ): return tokenizer(examples["""text"""] ) return fn def _lowerCamelCase(__UpperCamelCase ) -> Dict: _lowerCAmelCase =[] for i in range(len(tokenized_data["""input_ids"""] ) ): _lowerCAmelCase ={ """input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ), """attention_mask""": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ), } _lowerCAmelCase =tf.train.Features(feature=_A ) _lowerCAmelCase =tf.train.Example(features=_A ) _lowerCAmelCase =example.SerializeToString() records.append(_A ) return records def _lowerCamelCase(__UpperCamelCase ) -> Dict: _lowerCAmelCase =datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: _lowerCAmelCase =min(len(_A ) , args.limit ) _lowerCAmelCase =dataset.select(range(_A ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) _lowerCAmelCase =AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) _lowerCAmelCase =os.path.join(args.output_dir , args.split ) if not os.path.exists(_A ): os.makedirs(_A ) else: _lowerCAmelCase =os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. _lowerCAmelCase =tokenize_function(_A ) _lowerCAmelCase =dataset.map(_A , batched=_A , num_proc=4 , remove_columns=["""text"""] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(__UpperCamelCase ): # Concatenate all texts. _lowerCAmelCase ={k: sum(examples[k] , [] ) for k in examples.keys()} _lowerCAmelCase =len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 _lowerCAmelCase =(total_length // args.max_length) * args.max_length # Split by chunks of max_len. _lowerCAmelCase ={ k: [t[i : i + args.max_length] for i in range(0 , _A , args.max_length )] for k, t in concatenated_examples.items() } return result _lowerCAmelCase =dataset_tokenized.map(_A , batched=_A , batch_size=1000 , num_proc=4 ) _lowerCAmelCase =0 _lowerCAmelCase =0 for shard in range(0 , len(_A ) , args.shard_size ): _lowerCAmelCase =grouped_dataset[shard : shard + args.shard_size] _lowerCAmelCase =len(dataset_snapshot["""input_ids"""] ) _lowerCAmelCase =os.path.join(_A , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) _lowerCAmelCase =get_serialized_examples(_A ) with tf.io.TFRecordWriter(_A ) as out_file: for i in range(len(_A ) ): _lowerCAmelCase =serialized_examples[i] out_file.write(_A ) print("""Wrote file {} containing {} records""".format(_A , _A ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , """w""" ) as f: print(F'''Total {args.split} records: {total_records}''' , file=_A ) if __name__ == "__main__": __A = parse_args() main(args)
355
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple: if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" ) _lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" ) _lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" ) _lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" ) _lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item _lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" ) _lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" ) _lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _lowerCAmelCase =old_checkpoint[path] _lowerCAmelCase =old_tensor.shape[0] // 3 _lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1) _lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3 _lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 ) _lowerCAmelCase =query.reshape(__UpperCamelCase ) _lowerCAmelCase =key.reshape(__UpperCamelCase ) _lowerCAmelCase =value.reshape(__UpperCamelCase ) for path in paths: _lowerCAmelCase =path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0] else: _lowerCAmelCase =old_checkpoint[path["""old"""]] def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase ={} _lowerCAmelCase =checkpoint["""time_embed.0.weight"""] _lowerCAmelCase =checkpoint["""time_embed.0.bias"""] _lowerCAmelCase =checkpoint["""time_embed.2.weight"""] _lowerCAmelCase =checkpoint["""time_embed.2.bias"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""] _lowerCAmelCase =checkpoint["""out.0.weight"""] _lowerCAmelCase =checkpoint["""out.0.bias"""] _lowerCAmelCase =checkpoint["""out.2.weight"""] _lowerCAmelCase =checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the middle blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the output blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } for i in range(1 , __UpperCamelCase ): _lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key] if F'''input_blocks.{i}.0.op.weight''' in checkpoint: _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.weight''' ] _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.bias''' ] continue _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} _lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase ) if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''input_blocks.{i}.1''', """new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''input_blocks.{i}.1.qkv.bias''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''input_blocks.{i}.1.qkv.weight''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , ) _lowerCAmelCase =middle_blocks[0] _lowerCAmelCase =middle_blocks[1] _lowerCAmelCase =middle_blocks[2] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase ) for i in range(__UpperCamelCase ): _lowerCAmelCase =i // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =i % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]] _lowerCAmelCase ={} for layer in output_block_layers: _lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__UpperCamelCase ) else: _lowerCAmelCase =[layer_name] if len(__UpperCamelCase ) > 1: _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.weight''' ] _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(__UpperCamelCase ) == 2: _lowerCAmelCase =[] if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''output_blocks.{i}.1''', """new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''output_blocks.{i}.1.qkv.bias''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''output_blocks.{i}.1.qkv.weight''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , ) else: _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] ) _lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] ) _lowerCAmelCase =checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') __A = parser.parse_args() __A = torch.load(args.checkpoint_path) with open(args.config_file) as f: __A = json.loads(f.read()) __A = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __A = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) __A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) __A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
341
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: __A = None __A = logging.get_logger(__name__) __A = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} __A = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } __A = { 'facebook/nllb-large-en-ro': 1024, 'facebook/nllb-200-distilled-600M': 1024, } # fmt: off __A = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class lowerCamelCase__ ( snake_case_ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = ['''input_ids''', '''attention_mask'''] lowerCamelCase = NllbTokenizer lowerCamelCase = [] lowerCamelCase = [] def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[Any]: _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase =legacy_behaviour super().__init__( vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , legacy_behaviour=__UpperCAmelCase , **__UpperCAmelCase , ) _lowerCAmelCase =vocab_file _lowerCAmelCase =False if not self.vocab_file else True _lowerCAmelCase =FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} ) _lowerCAmelCase ={ lang_code: self.convert_tokens_to_ids(__UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _lowerCAmelCase =src_lang if src_lang is not None else 'eng_Latn' _lowerCAmelCase =self.convert_tokens_to_ids(self._src_lang ) _lowerCAmelCase =tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _lowerCAmelCase ( self ) -> str: return self._src_lang @src_lang.setter def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None: _lowerCAmelCase =new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> str: if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) _lowerCAmelCase =src_lang _lowerCAmelCase =self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) _lowerCAmelCase =self.convert_tokens_to_ids(__UpperCAmelCase ) _lowerCAmelCase =tgt_lang_id return inputs def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = "eng_Latn" , __UpperCAmelCase = None , __UpperCAmelCase = "fra_Latn" , **__UpperCAmelCase , ) -> BatchEncoding: _lowerCAmelCase =src_lang _lowerCAmelCase =tgt_lang return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: return self.set_src_lang_special_tokens(self.src_lang ) def _lowerCAmelCase ( self ) -> List[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None: _lowerCAmelCase =self.convert_tokens_to_ids(__UpperCAmelCase ) if self.legacy_behaviour: _lowerCAmelCase =[] _lowerCAmelCase =[self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase =[self.cur_lang_code] _lowerCAmelCase =[self.eos_token_id] _lowerCAmelCase =self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase =self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase =processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None: _lowerCAmelCase =self.convert_tokens_to_ids(__UpperCAmelCase ) if self.legacy_behaviour: _lowerCAmelCase =[] _lowerCAmelCase =[self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase =[self.cur_lang_code] _lowerCAmelCase =[self.eos_token_id] _lowerCAmelCase =self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase =self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase =processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
356
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase =0 _lowerCAmelCase =len(__UpperCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , __UpperCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _lowerCamelCase(__UpperCamelCase ) -> List[Any]: if len(__UpperCamelCase ) <= 1: return arr, 0 _lowerCAmelCase =len(__UpperCamelCase ) // 2 _lowerCAmelCase =arr[0:mid] _lowerCAmelCase =arr[mid:] _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =inversion_p + inversions_q + cross_inversions return c, num_inversions def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any: _lowerCAmelCase =[] _lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0 while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__UpperCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__UpperCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _lowerCamelCase() -> str: _lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , __UpperCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) # an empty list should also have zero inversions _lowerCAmelCase =[] _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) if __name__ == "__main__": main()
341
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["MBartTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["MBartTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "MBART_PRETRAINED_MODEL_ARCHIVE_LIST", "MBartForCausalLM", "MBartForConditionalGeneration", "MBartForQuestionAnswering", "MBartForSequenceClassification", "MBartModel", "MBartPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "TFMBartForConditionalGeneration", "TFMBartModel", "TFMBartPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "FlaxMBartForConditionalGeneration", "FlaxMBartForQuestionAnswering", "FlaxMBartForSequenceClassification", "FlaxMBartModel", "FlaxMBartPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
357
"""simple docstring""" import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = True lowerCamelCase = None lowerCamelCase = 1 lowerCamelCase = None lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None def _lowerCAmelCase ( self ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
341
0
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[int]: assert isinstance(A__ , A__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str: _lowerCAmelCase =tmp_path / """cache""" _lowerCAmelCase ={"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _lowerCAmelCase =TextDatasetReader(A__ , cache_dir=A__ , keep_in_memory=A__ ).read() _check_text_dataset(A__ , A__ ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple: _lowerCAmelCase =tmp_path / """cache""" _lowerCAmelCase ={"""text""": """string"""} _lowerCAmelCase =features.copy() if features else default_expected_features _lowerCAmelCase =( Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None ) _lowerCAmelCase =TextDatasetReader(A__ , features=A__ , cache_dir=A__ ).read() _check_text_dataset(A__ , A__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any: _lowerCAmelCase =tmp_path / """cache""" _lowerCAmelCase ={"""text""": """string"""} _lowerCAmelCase =TextDatasetReader(A__ , cache_dir=A__ , split=A__ ).read() _check_text_dataset(A__ , A__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: if issubclass(A__ , A__ ): _lowerCAmelCase =text_path elif issubclass(A__ , A__ ): _lowerCAmelCase =[text_path] _lowerCAmelCase =tmp_path / """cache""" _lowerCAmelCase ={"""text""": """string"""} _lowerCAmelCase =TextDatasetReader(A__ , cache_dir=A__ ).read() _check_text_dataset(A__ , A__ ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=("train",) ) -> List[str]: assert isinstance(A__ , A__ ) for split in splits: _lowerCAmelCase =dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple: _lowerCAmelCase =tmp_path / """cache""" _lowerCAmelCase ={"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _lowerCAmelCase =TextDatasetReader({"""train""": text_path} , cache_dir=A__ , keep_in_memory=A__ ).read() _check_text_datasetdict(A__ , A__ ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: _lowerCAmelCase =tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" _lowerCAmelCase ={"""text""": """string"""} _lowerCAmelCase =features.copy() if features else default_expected_features _lowerCAmelCase =( Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None ) _lowerCAmelCase =TextDatasetReader({"""train""": text_path} , features=A__ , cache_dir=A__ ).read() _check_text_datasetdict(A__ , A__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple: if split: _lowerCAmelCase ={split: text_path} else: _lowerCAmelCase ="""train""" _lowerCAmelCase ={"""train""": text_path, """test""": text_path} _lowerCAmelCase =tmp_path / """cache""" _lowerCAmelCase ={"""text""": """string"""} _lowerCAmelCase =TextDatasetReader(A__ , cache_dir=A__ ).read() _check_text_datasetdict(A__ , A__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
358
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int: return int((input_a, input_a).count(1 ) != 0 ) def _lowerCamelCase() -> None: assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
341
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule __A = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
359
"""simple docstring""" import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py __A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' __A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' __A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple: _lowerCAmelCase =compute_bleu( reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
341
0
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency __A = { 'E': 12.70, 'T': 9.06, 'A': 8.17, 'O': 7.51, 'I': 6.97, 'N': 6.75, 'S': 6.33, 'H': 6.09, 'R': 5.99, 'D': 4.25, 'L': 4.03, 'C': 2.78, 'U': 2.76, 'M': 2.41, 'W': 2.36, 'F': 2.23, 'G': 2.02, 'Y': 1.97, 'P': 1.93, 'B': 1.29, 'V': 0.98, 'K': 0.77, 'J': 0.15, 'X': 0.15, 'Q': 0.10, 'Z': 0.07, } __A = 'ETAOINSHRDLCUMWFGYPBVKJXQZ' __A = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def _lowerCamelCase(__UpperCamelCase ) -> dict[str, int]: _lowerCAmelCase ={letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def _lowerCamelCase(__UpperCamelCase ) -> str: return x[0] def _lowerCamelCase(__UpperCamelCase ) -> str: _lowerCAmelCase =get_letter_count(a__ ) _lowerCAmelCase ={ freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(a__ ) _lowerCAmelCase ={} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=a__ ) _lowerCAmelCase ="""""".join(freq_to_letter[freq] ) _lowerCAmelCase =list(freq_to_letter_str.items() ) freq_pairs.sort(key=a__ , reverse=a__ ) _lowerCAmelCase =[freq_pair[1] for freq_pair in freq_pairs] return "".join(a__ ) def _lowerCamelCase(__UpperCamelCase ) -> int: _lowerCAmelCase =get_frequency_order(a__ ) _lowerCAmelCase =0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
360
"""simple docstring""" import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=512, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def _lowerCamelCase(__UpperCamelCase ) -> List[str]: if string == "True": return True elif string == "False": return False else: raise ValueError(F'''could not parse string as bool {string}''' ) parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) __A = parser.parse_args() __A = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
341
0
"""simple docstring""" import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# __A = [ # (stable-diffusion, HF Diffusers) ('time_embed.0.weight', 'time_embedding.linear_1.weight'), ('time_embed.0.bias', 'time_embedding.linear_1.bias'), ('time_embed.2.weight', 'time_embedding.linear_2.weight'), ('time_embed.2.bias', 'time_embedding.linear_2.bias'), ('input_blocks.0.0.weight', 'conv_in.weight'), ('input_blocks.0.0.bias', 'conv_in.bias'), ('out.0.weight', 'conv_norm_out.weight'), ('out.0.bias', 'conv_norm_out.bias'), ('out.2.weight', 'conv_out.weight'), ('out.2.bias', 'conv_out.bias'), ] __A = [ # (stable-diffusion, HF Diffusers) ('in_layers.0', 'norm1'), ('in_layers.2', 'conv1'), ('out_layers.0', 'norm2'), ('out_layers.3', 'conv2'), ('emb_layers.1', 'time_emb_proj'), ('skip_connection', 'conv_shortcut'), ] __A = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks __A = F"""down_blocks.{i}.resnets.{j}.""" __A = F"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 __A = F"""down_blocks.{i}.attentions.{j}.""" __A = F"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks __A = F"""up_blocks.{i}.resnets.{j}.""" __A = F"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 __A = F"""up_blocks.{i}.attentions.{j}.""" __A = F"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 __A = F"""down_blocks.{i}.downsamplers.0.conv.""" __A = F"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 __A = F"""up_blocks.{i}.upsamplers.0.""" __A = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) __A = 'mid_block.attentions.0.' __A = 'middle_block.1.' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): __A = F"""mid_block.resnets.{j}.""" __A = F"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: # buyer beware: this is a *brittle* function, # and correct output requires that all of these pieces interact in # the exact order in which I have arranged them. _lowerCAmelCase ={k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: _lowerCAmelCase =sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: _lowerCAmelCase =v.replace(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: _lowerCAmelCase =v.replace(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =v _lowerCAmelCase ={v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# __A = [ # (stable-diffusion, HF Diffusers) ('nin_shortcut', 'conv_shortcut'), ('norm_out', 'conv_norm_out'), ('mid.attn_1.', 'mid_block.attentions.0.'), ] for i in range(4): # down_blocks have two resnets for j in range(2): __A = F"""encoder.down_blocks.{i}.resnets.{j}.""" __A = F"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: __A = F"""down_blocks.{i}.downsamplers.0.""" __A = F"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) __A = F"""up_blocks.{i}.upsamplers.0.""" __A = F"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): __A = F"""decoder.up_blocks.{i}.resnets.{j}.""" __A = F"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): __A = F"""mid_block.resnets.{i}.""" __A = F"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) __A = [ # (stable-diffusion, HF Diffusers) ('norm.', 'group_norm.'), ('q.', 'query.'), ('k.', 'key.'), ('v.', 'value.'), ('proj_out.', 'proj_attn.'), ] def _lowerCamelCase(__UpperCamelCase ) -> int: # convert HF linear weights to SD conv2d weights return w.reshape(*w.shape , 1 , 1 ) def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]: _lowerCAmelCase ={k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: _lowerCAmelCase =v.replace(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: _lowerCAmelCase =v.replace(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =v _lowerCAmelCase ={v: vae_state_dict[k] for k, v in mapping.items()} _lowerCAmelCase =["""q""", """k""", """v""", """proj_out"""] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if F'''mid.attn_1.{weight_name}.weight''' in k: print(F'''Reshaping {k} for SD format''' ) _lowerCAmelCase =reshape_weight_for_sd(__UpperCamelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# __A = [ # (stable-diffusion, HF Diffusers) ('resblocks.', 'text_model.encoder.layers.'), ('ln_1', 'layer_norm1'), ('ln_2', 'layer_norm2'), ('.c_fc.', '.fc1.'), ('.c_proj.', '.fc2.'), ('.attn', '.self_attn'), ('ln_final.', 'transformer.text_model.final_layer_norm.'), ('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'), ('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'), ] __A = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} __A = re.compile('|'.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp __A = {'q': 0, 'k': 1, 'v': 2} def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase ={} _lowerCAmelCase ={} _lowerCAmelCase ={} for k, v in text_enc_dict.items(): if ( k.endswith(""".self_attn.q_proj.weight""" ) or k.endswith(""".self_attn.k_proj.weight""" ) or k.endswith(""".self_attn.v_proj.weight""" ) ): _lowerCAmelCase =k[: -len(""".q_proj.weight""" )] _lowerCAmelCase =k[-len("""q_proj.weight""" )] if k_pre not in capture_qkv_weight: _lowerCAmelCase =[None, None, None] _lowerCAmelCase =v continue if ( k.endswith(""".self_attn.q_proj.bias""" ) or k.endswith(""".self_attn.k_proj.bias""" ) or k.endswith(""".self_attn.v_proj.bias""" ) ): _lowerCAmelCase =k[: -len(""".q_proj.bias""" )] _lowerCAmelCase =k[-len("""q_proj.bias""" )] if k_pre not in capture_qkv_bias: _lowerCAmelCase =[None, None, None] _lowerCAmelCase =v continue _lowerCAmelCase =textenc_pattern.sub(lambda __UpperCamelCase : protected[re.escape(m.group(0 ) )] , __UpperCamelCase ) _lowerCAmelCase =v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) _lowerCAmelCase =textenc_pattern.sub(lambda __UpperCamelCase : protected[re.escape(m.group(0 ) )] , __UpperCamelCase ) _lowerCAmelCase =torch.cat(__UpperCamelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) _lowerCAmelCase =textenc_pattern.sub(lambda __UpperCamelCase : protected[re.escape(m.group(0 ) )] , __UpperCamelCase ) _lowerCAmelCase =torch.cat(__UpperCamelCase ) return new_state_dict def _lowerCamelCase(__UpperCamelCase ) -> int: return text_enc_dict if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.' ) __A = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors __A = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors') __A = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors') __A = osp.join(args.model_path, 'text_encoder', 'model.safetensors') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): __A = load_file(unet_path, device='cpu') else: __A = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin') __A = torch.load(unet_path, map_location='cpu') if osp.exists(vae_path): __A = load_file(vae_path, device='cpu') else: __A = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin') __A = torch.load(vae_path, map_location='cpu') if osp.exists(text_enc_path): __A = load_file(text_enc_path, device='cpu') else: __A = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin') __A = torch.load(text_enc_path, map_location='cpu') # Convert the UNet model __A = convert_unet_state_dict(unet_state_dict) __A = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model __A = convert_vae_state_dict(vae_state_dict) __A = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper __A = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm __A = {'transformer.' + k: v for k, v in text_enc_dict.items()} __A = convert_text_enc_state_dict_vaa(text_enc_dict) __A = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()} else: __A = convert_text_enc_state_dict(text_enc_dict) __A = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint __A = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: __A = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: __A = {'state_dict': state_dict} torch.save(state_dict, args.checkpoint_path)
361
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __A = { 'configuration_audio_spectrogram_transformer': [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ASTConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ASTForAudioClassification', 'ASTModel', 'ASTPreTrainedModel', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['ASTFeatureExtractor'] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> str: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError("""iterations must be defined as integers""" ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not number >= 1: raise ValueError( """starting number must be and integer and be more than 0""" ) if not iterations >= 1: raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" ) _lowerCAmelCase ="""""" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__lowerCAmelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
362
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { 'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'], 'tokenization_m2m_100': ['M2M100Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST', 'M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase ) -> List[str]: return "".join(chr(ord(lowercase__ ) - 32 ) if """a""" <= char <= """z""" else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
363
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __A = datasets.logging.get_logger(__name__) __A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' __A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' __A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict: _lowerCAmelCase ={doc: key_lines} _lowerCAmelCase ={doc: sys_lines} _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase ) key_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase ) sys_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) if remove_nested: _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' ) logger.info( """Number of resulting singleton clusters in the key """ F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' ) if not keep_singletons: logger.info( F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' """files, respectively""" ) return doc_coref_infos def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: _lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 for name, metric in metrics: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} ) logger.info( name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , ) if conll_subparts_num == 3: _lowerCAmelCase =(conll / 3) * 100 logger.info(F'''CoNLL score: {conll:.2f}''' ) output_scores.update({"""conll_score""": conll} ) return output_scores def _lowerCamelCase(__UpperCamelCase ) -> Tuple: _lowerCAmelCase =False for line in key_lines: if not line.startswith("""#""" ): if len(line.split() ) > 6: _lowerCAmelCase =line.split()[5] if not parse_col == "-": _lowerCAmelCase =True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]: _lowerCAmelCase =[ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: _lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _lowerCAmelCase =evaluate( key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , ) return score
341
0
"""simple docstring""" import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _lowerCamelCase(__UpperCamelCase ) -> int: _lowerCAmelCase =filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() ) _lowerCAmelCase =sum([np.prod(p.size() ) for p in model_parameters] ) return params __A = logging.getLogger(__name__) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[int]: if metric == "rouge2": _lowerCAmelCase ="{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": _lowerCAmelCase ="{val_avg_bleu:.4f}-{step_count}" elif metric == "em": _lowerCAmelCase ="{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' """ function.""" ) _lowerCAmelCase =ModelCheckpoint( dirpath=__snake_case , filename=__snake_case , monitor=F'''val_{metric}''' , mode="""max""" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Tuple: return EarlyStopping( monitor=F'''val_{metric}''' , mode="""min""" if """loss""" in metric else """max""" , patience=__snake_case , verbose=__snake_case , ) class lowerCamelCase__ ( pl.Callback ): '''simple docstring''' def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: _lowerCAmelCase ={f'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE ) @rank_zero_only def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ) -> None: logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) _lowerCAmelCase =trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} ) # Log results _lowerCAmelCase =Path(pl_module.hparams.output_dir ) if type_path == "test": _lowerCAmelCase =od / "test_results.txt" _lowerCAmelCase =od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _lowerCAmelCase =od / f'''{type_path}_results/{trainer.global_step:05d}.txt''' _lowerCAmelCase =od / f'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) generations_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE , """a+""" ) as writer: for key in sorted(_SCREAMING_SNAKE_CASE ): if key in ["log", "progress_bar", "preds"]: continue _lowerCAmelCase =metrics[key] if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _lowerCAmelCase =val.item() _lowerCAmelCase =f'''{key}: {val:.6f}\n''' writer.write(_SCREAMING_SNAKE_CASE ) if not save_generations: return if "preds" in metrics: _lowerCAmelCase ="\n".join(metrics["""preds"""] ) generations_file.open("""w+""" ).write(_SCREAMING_SNAKE_CASE ) @rank_zero_only def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: try: _lowerCAmelCase =pl_module.model.model.num_parameters() except AttributeError: _lowerCAmelCase =pl_module.model.num_parameters() _lowerCAmelCase =count_trainable_parameters(_SCREAMING_SNAKE_CASE ) # mp stands for million parameters trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} ) @rank_zero_only def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """test""" ) @rank_zero_only def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
364
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = XGLMConfig lowerCamelCase = {} lowerCamelCase = '''gelu''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_input_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =ffn_dim _lowerCAmelCase =activation_function _lowerCAmelCase =activation_dropout _lowerCAmelCase =attention_dropout _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =initializer_range _lowerCAmelCase =None _lowerCAmelCase =0 _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Dict: return XGLMConfig.from_pretrained("""facebook/xglm-564M""" ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _lowerCAmelCase =None if self.use_input_mask: _lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase =self.get_config() _lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowerCAmelCase ( self ) -> str: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) =config_and_inputs _lowerCAmelCase ={ """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCamelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =TFXGLMModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 ) def _lowerCAmelCase ( self ) -> int: self.config_tester.run_common_tests() @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" ) def _lowerCAmelCase ( self ) -> Union[str, Any]: super().test_resize_token_embeddings() @require_tf class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) tf.random.set_seed(0 ) _lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" ) _lowerCAmelCase =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0""" ): _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] ) _lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase ="""left""" # use different length sentences to test batching _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase ) _lowerCAmelCase =inputs["""input_ids"""] _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
341
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json' ), } class lowerCamelCase__ ( __snake_case ): '''simple docstring''' lowerCamelCase = "xlm-roberta" def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> str: super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) _lowerCAmelCase =vocab_size _lowerCAmelCase =hidden_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =hidden_act _lowerCAmelCase =intermediate_size _lowerCAmelCase =hidden_dropout_prob _lowerCAmelCase =attention_probs_dropout_prob _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =type_vocab_size _lowerCAmelCase =initializer_range _lowerCAmelCase =layer_norm_eps _lowerCAmelCase =position_embedding_type _lowerCAmelCase =use_cache _lowerCAmelCase =classifier_dropout class lowerCamelCase__ ( __snake_case ): '''simple docstring''' @property def _lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _lowerCAmelCase ={0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCAmelCase ={0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
365
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __A = logging.get_logger(__name__) __A = {'vocab_file': 'spiece.model'} __A = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } __A = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) __A = 0 __A = 1 __A = 2 __A = 3 __A = 4 class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = '''left''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =3 _lowerCAmelCase =do_lower_case _lowerCAmelCase =remove_space _lowerCAmelCase =keep_accents _lowerCAmelCase =vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> str: return len(self.sp_model ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[int]: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None return state def __setstate__( self , __UpperCAmelCase ) -> Tuple: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]: if self.remove_space: _lowerCAmelCase =""" """.join(inputs.strip().split() ) else: _lowerCAmelCase =inputs _lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: _lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase ) _lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] ) if self.do_lower_case: _lowerCAmelCase =outputs.lower() return outputs def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: _lowerCAmelCase =self.preprocess_text(__UpperCAmelCase ) _lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) _lowerCAmelCase =[] for piece in pieces: if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): _lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _lowerCAmelCase =cur_pieces[1:] else: _lowerCAmelCase =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCAmelCase ) else: new_pieces.append(__UpperCAmelCase ) return new_pieces def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]: return self.sp_model.PieceToId(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.IdToPiece(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str: _lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase ) _lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _lowerCAmelCase =[] _lowerCAmelCase =[] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) _lowerCAmelCase =[] sub_texts.append(__UpperCAmelCase ) else: current_sub_text.append(__UpperCAmelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens _lowerCAmelCase ="""""".join(__UpperCAmelCase ) _lowerCAmelCase =( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase ) return clean_text else: return text def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] return ([0] * len(__UpperCAmelCase )) + [1, 1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
341
0
"""simple docstring""" # Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ) -> Optional[Any]: if name is None: _lowerCAmelCase =None else: _lowerCAmelCase =""".""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}""" _lowerCAmelCase =fmt.format(SCREAMING_SNAKE_CASE_ ) # Print and recurse (if needed). if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if msg is not None: print(SCREAMING_SNAKE_CASE_ ) for k in val.keys(): recursive_print(SCREAMING_SNAKE_CASE_ , val[k] , spaces + 2 ) elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): print(SCREAMING_SNAKE_CASE_ , """:""" , val.size() ) else: print(SCREAMING_SNAKE_CASE_ , """:""" , SCREAMING_SNAKE_CASE_ ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any: _lowerCAmelCase =param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] _lowerCAmelCase =(num_heads, hidden_size, num_splits) + input_shape[1:] _lowerCAmelCase =param.view(*SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =param.transpose(0 , 2 ) _lowerCAmelCase =param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] _lowerCAmelCase =(num_heads, num_splits, hidden_size) + input_shape[1:] _lowerCAmelCase =param.view(*SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =param.transpose(0 , 1 ).contiguous() _lowerCAmelCase =param.view(*SCREAMING_SNAKE_CASE_ ) return param def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase ={} # old versions did not store training args _lowerCAmelCase =input_state_dict.get("""args""" , SCREAMING_SNAKE_CASE_ ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) _lowerCAmelCase =ds_args.padded_vocab_size _lowerCAmelCase =ds_args.max_position_embeddings _lowerCAmelCase =ds_args.hidden_size _lowerCAmelCase =ds_args.num_layers _lowerCAmelCase =ds_args.num_attention_heads _lowerCAmelCase =ds_args.ffn_hidden_size # pprint(config) # The number of heads. _lowerCAmelCase =config.n_head # The hidden_size per head. _lowerCAmelCase =config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): _lowerCAmelCase =input_state_dict["""checkpoint_version"""] else: _lowerCAmelCase =0.0 # The model. _lowerCAmelCase =input_state_dict["""model"""] # The language model. _lowerCAmelCase =model["""language_model"""] # The embeddings. _lowerCAmelCase =lm["""embedding"""] # The word embeddings. _lowerCAmelCase =embeddings["""word_embeddings"""]["""weight"""] # Truncate the embedding table to vocab_size rows. _lowerCAmelCase =word_embeddings[: config.vocab_size, :] _lowerCAmelCase =word_embeddings # The position embeddings. _lowerCAmelCase =embeddings["""position_embeddings"""]["""weight"""] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] _lowerCAmelCase =pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' ) # Store the position embeddings. _lowerCAmelCase =pos_embeddings # The transformer. _lowerCAmelCase =lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""] # The regex to extract layer names. _lowerCAmelCase =re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" ) # The simple map of names for "automated" rules. _lowerCAmelCase ={ """attention.dense""": """.attn.c_proj.""", """self_attention.dense""": """.attn.c_proj.""", """mlp.dense_h_to_4h""": """.mlp.c_fc.""", """mlp.dense_4h_to_h""": """.mlp.c_proj.""", } # Extract the layers. for key, val in transformer.items(): # Match the name. _lowerCAmelCase =layer_re.match(SCREAMING_SNAKE_CASE_ ) # Stop if that's not a layer if m is None: break # The index of the layer. _lowerCAmelCase =int(m.group(1 ) ) # The name of the operation. _lowerCAmelCase =m.group(2 ) # Is it a weight or a bias? _lowerCAmelCase =m.group(3 ) # The name of the layer. _lowerCAmelCase =F'''transformer.h.{layer_idx}''' # For layernorm(s), simply store the layer norm. if op_name.endswith("""layernorm""" ): _lowerCAmelCase ="""ln_1""" if op_name.startswith("""input""" ) else """ln_2""" _lowerCAmelCase =val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. _lowerCAmelCase =torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =causal_mask # Insert a "dummy" tensor for masked_bias. _lowerCAmelCase =torch.tensor(-1E4 , dtype=torch.floataa ) _lowerCAmelCase =masked_bias _lowerCAmelCase =fix_query_key_value_ordering(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. _lowerCAmelCase =out_val.transpose(0 , 1 ).contiguous() # Store. _lowerCAmelCase =out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": _lowerCAmelCase =fix_query_key_value_ordering(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Store. No change of shape. _lowerCAmelCase =out_val # Transpose the weights. elif weight_or_bias == "weight": _lowerCAmelCase =megatron_to_transformers[op_name] _lowerCAmelCase =val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": _lowerCAmelCase =megatron_to_transformers[op_name] _lowerCAmelCase =val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. _lowerCAmelCase =transformer["""final_layernorm.weight"""] _lowerCAmelCase =transformer["""final_layernorm.bias"""] # For LM head, transformers' wants the matrix to weight embeddings. _lowerCAmelCase =word_embeddings # It should be done! return output_state_dict def _lowerCamelCase() -> Any: _lowerCAmelCase =argparse.ArgumentParser() parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" ) parser.add_argument( """path_to_checkpoint""" , type=SCREAMING_SNAKE_CASE_ , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , ) parser.add_argument( """--config_file""" , default="""""" , type=SCREAMING_SNAKE_CASE_ , help="""An optional config json file describing the pre-trained model.""" , ) _lowerCAmelCase =parser.parse_args() # Extract the basename. _lowerCAmelCase =os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' ) if args.path_to_checkpoint.endswith(""".zip""" ): with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint: with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict: _lowerCAmelCase =torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" ) else: _lowerCAmelCase =torch.load(args.path_to_checkpoint , map_location="""cpu""" ) _lowerCAmelCase =input_state_dict.get("""args""" , SCREAMING_SNAKE_CASE_ ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: _lowerCAmelCase ="""gelu_fast""" elif ds_args.openai_gelu: _lowerCAmelCase ="""gelu_new""" else: _lowerCAmelCase ="""gelu""" else: # in the very early days this used to be "gelu_new" _lowerCAmelCase ="""gelu_new""" # Spell out all parameters in case the defaults change. _lowerCAmelCase =GPTaConfig( vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=SCREAMING_SNAKE_CASE_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=SCREAMING_SNAKE_CASE_ , summary_activation=SCREAMING_SNAKE_CASE_ , summary_proj_to_labels=SCREAMING_SNAKE_CASE_ , summary_first_dropout=0.1 , scale_attn_weights=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=50256 , eos_token_id=50256 , ) else: _lowerCAmelCase =GPTaConfig.from_json_file(args.config_file ) _lowerCAmelCase =["""GPT2LMHeadModel"""] # Convert. print("""Converting""" ) _lowerCAmelCase =convert_megatron_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: _lowerCAmelCase =ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": _lowerCAmelCase ="""gpt2""" elif tokenizer_type == "PretrainedFromHF": _lowerCAmelCase =ds_args.tokenizer_name_or_path else: raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' ) else: _lowerCAmelCase ="""gpt2""" _lowerCAmelCase =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =type(SCREAMING_SNAKE_CASE_ ).__name__ _lowerCAmelCase =tokenizer_class # Store the config to file. print("""Saving config""" ) config.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Save tokenizer based on args print(F'''Adding {tokenizer_class} tokenizer files''' ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Store the state_dict to file. _lowerCAmelCase =os.path.join(SCREAMING_SNAKE_CASE_ , """pytorch_model.bin""" ) print(F'''Saving checkpoint to \"{output_checkpoint_file}\"''' ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
366
"""simple docstring""" from __future__ import annotations def _lowerCamelCase(__UpperCamelCase ) -> bool: _lowerCAmelCase =str(__UpperCamelCase ) return n == n[::-1] def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str: _lowerCAmelCase =0 for i in range(1 , __UpperCamelCase ): if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
341
0
"""simple docstring""" import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ) -> Dict: if attention_mask is None: _lowerCAmelCase =input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: _lowerCAmelCase =decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: _lowerCAmelCase =torch.ones(config.encoder_layers , config.encoder_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if decoder_head_mask is None: _lowerCAmelCase =torch.ones(config.decoder_layers , config.decoder_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if cross_attn_head_mask is None: _lowerCAmelCase =torch.ones(config.decoder_layers , config.decoder_attention_heads , device=SCREAMING_SNAKE_CASE__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="relu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , ) -> List[str]: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =hidden_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =intermediate_size _lowerCAmelCase =hidden_act _lowerCAmelCase =hidden_dropout_prob _lowerCAmelCase =attention_probs_dropout_prob _lowerCAmelCase =encoder_layerdrop _lowerCAmelCase =decoder_layerdrop _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =eos_token_id _lowerCAmelCase =pad_token_id _lowerCAmelCase =bos_token_id def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase =self.eos_token_id # Eos Token _lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _lowerCAmelCase =input_ids.clamp(self.pad_token_id + 1 ) _lowerCAmelCase =decoder_input_ids.clamp(self.pad_token_id + 1 ) _lowerCAmelCase =self.get_config() _lowerCAmelCase =prepare_mam_aaa_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def _lowerCAmelCase ( self ) -> Tuple: return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase , _lowerCAmelCase =self.prepare_config_and_inputs() return config, inputs_dict def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: _lowerCAmelCase =MaMaaaModel(config=_UpperCAmelCase ).get_decoder().to(_UpperCAmelCase ).eval() _lowerCAmelCase =inputs_dict["""input_ids"""] _lowerCAmelCase =inputs_dict["""attention_mask"""] _lowerCAmelCase =inputs_dict["""head_mask"""] # first forward pass _lowerCAmelCase =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase ) _lowerCAmelCase , _lowerCAmelCase =outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids _lowerCAmelCase =ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowerCAmelCase =ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and _lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 ) _lowerCAmelCase =torch.cat([attention_mask, next_attn_mask] , dim=-1 ) _lowerCAmelCase =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )["""last_hidden_state"""] _lowerCAmelCase =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[ """last_hidden_state""" ] # select random slice _lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowerCAmelCase =output_from_no_past[:, -3:, random_slice_idx].detach() _lowerCAmelCase =output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-2 ) ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int: _lowerCAmelCase =MaMaaaModel(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval() _lowerCAmelCase =model(**_UpperCAmelCase ) _lowerCAmelCase =outputs.encoder_last_hidden_state _lowerCAmelCase =outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase =model.get_encoder() encoder.save_pretrained(_UpperCAmelCase ) _lowerCAmelCase =MaMaaaEncoder.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase ) _lowerCAmelCase =encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase =model.get_decoder() decoder.save_pretrained(_UpperCAmelCase ) _lowerCAmelCase =MaMaaaDecoder.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase ) _lowerCAmelCase =decoder( input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class lowerCamelCase__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) lowerCamelCase = (MaMaaaForConditionalGeneration,) if is_torch_available() else () lowerCamelCase = ( { """conversational""": MaMaaaForConditionalGeneration, """feature-extraction""": MaMaaaModel, """summarization""": MaMaaaForConditionalGeneration, """text2text-generation""": MaMaaaForConditionalGeneration, """translation""": MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) lowerCamelCase = True lowerCamelCase = True lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =MaMaaaModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=_UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Optional[int]: self.config_tester.run_common_tests() def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: _lowerCAmelCase =model_class(_UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase ) _lowerCAmelCase , _lowerCAmelCase =model_class.from_pretrained(_UpperCAmelCase , output_loading_info=_UpperCAmelCase ) self.assertEqual(info["""missing_keys"""] , [] ) def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*_UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*_UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): _lowerCAmelCase =model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase =copy.deepcopy(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) if not self.is_encoder_decoder: _lowerCAmelCase =inputs["""input_ids"""] del inputs["input_ids"] else: _lowerCAmelCase =inputs["""input_ids"""] _lowerCAmelCase =inputs.get("""decoder_input_ids""" , _UpperCAmelCase ) del inputs["input_ids"] inputs.pop("""decoder_input_ids""" , _UpperCAmelCase ) _lowerCAmelCase =model.get_input_embeddings() if not self.is_encoder_decoder: _lowerCAmelCase =wte(_UpperCAmelCase ) else: _lowerCAmelCase =wte(_UpperCAmelCase ) _lowerCAmelCase =wte(_UpperCAmelCase ) with torch.no_grad(): model(**_UpperCAmelCase )[0] def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() _lowerCAmelCase =input_dict["""input_ids"""] _lowerCAmelCase =input_ids.ne(1 ).to(_UpperCAmelCase ) _lowerCAmelCase =MaMaaaForConditionalGeneration(_UpperCAmelCase ).eval().to(_UpperCAmelCase ) if torch_device == "cuda": model.half() model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ) model.generate(num_beams=4 , do_sample=_UpperCAmelCase , early_stopping=_UpperCAmelCase , num_return_sequences=3 ) def _lowerCamelCase(__UpperCamelCase ) -> Tuple: return torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) __A = 1E-4 @require_torch @require_sentencepiece @require_tokenizers @slow class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCAmelCase ( self ) -> Tuple: return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(_UpperCAmelCase ) _lowerCAmelCase =_long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] ) _lowerCAmelCase =_long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] ) _lowerCAmelCase =prepare_mam_aaa_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase ) with torch.no_grad(): _lowerCAmelCase =model(**_UpperCAmelCase )[0] _lowerCAmelCase =torch.Size((1, 11, 10_24) ) self.assertEqual(output.shape , _UpperCAmelCase ) # change to expected output here _lowerCAmelCase =torch.tensor( [[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=_UpperCAmelCase ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(_UpperCAmelCase ) # change to intended input _lowerCAmelCase =_long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] ) _lowerCAmelCase =_long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] ) _lowerCAmelCase =prepare_mam_aaa_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase ) with torch.no_grad(): _lowerCAmelCase =model(**_UpperCAmelCase )[0] _lowerCAmelCase =torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , _UpperCAmelCase ) # change to expected output here _lowerCAmelCase =torch.tensor( [[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=_UpperCAmelCase ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(_UpperCAmelCase ) _lowerCAmelCase =MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" ) _lowerCAmelCase =[ """L'affaire NSA souligne l'absence totale de débat sur le renseignement""", """Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""", """Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent""" """ Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de""" """ l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""", ] # The below article tests that we don't add any hypotheses outside of the top n_beams _lowerCAmelCase =tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors="""pt""" ) _lowerCAmelCase =model.generate( input_ids=dct["""input_ids"""].to(_UpperCAmelCase ) , attention_mask=dct["""attention_mask"""].to(_UpperCAmelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , ) _lowerCAmelCase =[ """The NSA case highlights the total absence of intelligence debate""", """I think there are two levels of response from the French government.""", """When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.""" """ Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all""" """ communications in France.""", ] _lowerCAmelCase =tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) assert generated == expected_en
367
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''llama''' lowerCamelCase = ['''past_key_values'''] def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: _lowerCAmelCase =vocab_size _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =hidden_size _lowerCAmelCase =intermediate_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads # for backward compatibility if num_key_value_heads is None: _lowerCAmelCase =num_attention_heads _lowerCAmelCase =num_key_value_heads _lowerCAmelCase =hidden_act _lowerCAmelCase =initializer_range _lowerCAmelCase =rms_norm_eps _lowerCAmelCase =pretraining_tp _lowerCAmelCase =use_cache _lowerCAmelCase =rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> str: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f'''got {self.rope_scaling}''' ) _lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase ) _lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
341
0
"""simple docstring""" from __future__ import annotations from collections import deque class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase ) -> str: _lowerCAmelCase =[] self.adlist.append( {"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} ) for keyword in keywords: self.add_keyword(__lowerCamelCase ) self.set_fail_transitions() def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]: _lowerCAmelCase =0 for character in keyword: _lowerCAmelCase =self.find_next_state(__lowerCamelCase , __lowerCamelCase ) if next_state is None: self.adlist.append( { """value""": character, """next_states""": [], """fail_state""": 0, """output""": [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) _lowerCAmelCase =len(self.adlist ) - 1 else: _lowerCAmelCase =next_state self.adlist[current_state]["output"].append(__lowerCamelCase ) def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =deque() for node in self.adlist[0]["next_states"]: q.append(__lowerCamelCase ) _lowerCAmelCase =0 while q: _lowerCAmelCase =q.popleft() for child in self.adlist[r]["next_states"]: q.append(__lowerCamelCase ) _lowerCAmelCase =self.adlist[r]["""fail_state"""] while ( self.find_next_state(__lowerCamelCase , self.adlist[child]["""value"""] ) is None and state != 0 ): _lowerCAmelCase =self.adlist[state]["""fail_state"""] _lowerCAmelCase =self.find_next_state( __lowerCamelCase , self.adlist[child]["""value"""] ) if self.adlist[child]["fail_state"] is None: _lowerCAmelCase =0 _lowerCAmelCase =( self.adlist[child]["""output"""] + self.adlist[self.adlist[child]["""fail_state"""]]["""output"""] ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Dict: _lowerCAmelCase ={} # returns a dict with keywords and list of its occurrences _lowerCAmelCase =0 for i in range(len(__lowerCamelCase ) ): while ( self.find_next_state(__lowerCamelCase , string[i] ) is None and current_state != 0 ): _lowerCAmelCase =self.adlist[current_state]["""fail_state"""] _lowerCAmelCase =self.find_next_state(__lowerCamelCase , string[i] ) if next_state is None: _lowerCAmelCase =0 else: _lowerCAmelCase =next_state for key in self.adlist[current_state]["output"]: if key not in result: _lowerCAmelCase =[] result[key].append(i - len(__lowerCamelCase ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
368
"""simple docstring""" import warnings from .generation import TFGenerationMixin class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' # warning at import time warnings.warn( '''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ''' '''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __magic_name__ , )
341
0
import os from datetime import datetime as dt from github import Github __A = [ 'good first issue', 'feature request', 'wip', ] def _lowerCamelCase() -> List[str]: _lowerCAmelCase =Github(os.environ["""GITHUB_TOKEN"""] ) _lowerCAmelCase =g.get_repo("""huggingface/accelerate""" ) _lowerCAmelCase =repo.get_issues(state="""open""" ) for issue in open_issues: _lowerCAmelCase =sorted([comment for comment in issue.get_comments()] , key=lambda __UpperCamelCase : i.created_at , reverse=a__ ) _lowerCAmelCase =comments[0] if len(a__ ) > 0 else None _lowerCAmelCase =dt.utcnow() _lowerCAmelCase =(current_time - issue.updated_at).days _lowerCAmelCase =(current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state="""closed""" ) elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) if __name__ == "__main__": main()
369
"""simple docstring""" import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =decoder_seq_length # For common tests _lowerCAmelCase =self.decoder_seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_attention_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =d_model _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_ffn_dim _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =eos_token_id _lowerCAmelCase =bos_token_id _lowerCAmelCase =pad_token_id _lowerCAmelCase =decoder_start_token_id _lowerCAmelCase =use_cache _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =None _lowerCAmelCase =decoder_seq_length _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =None if self.use_attention_mask: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) _lowerCAmelCase =None if self.use_labels: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]: _lowerCAmelCase =True _lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval() _lowerCAmelCase =input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 ) _lowerCAmelCase =outputs["""past_key_values"""] # create hypothetical next token and extent to next_input_ids _lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and _lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 ) _lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""] _lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""] # select random slice _lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() _lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs _lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else () lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {} lowerCamelCase = True lowerCamelCase = False def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> List[str]: pass def _lowerCAmelCase ( self ) -> List[Any]: pass def _lowerCAmelCase ( self ) -> Any: pass def _lowerCAmelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: return @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def _lowerCAmelCase ( self ) -> str: pass
341
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: __A = None __A = logging.get_logger(__name__) __A = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} __A = { 'vocab_file': { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model', }, 'tokenizer_file': { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json', }, } __A = { 'google/fnet-base': 512, 'google/fnet-large': 512, } __A = '▁' class lowerCamelCase__ ( a__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = ['''input_ids''', '''token_type_ids'''] lowerCamelCase = FNetTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ) -> Any: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _lowerCAmelCase =( AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase , normalized=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token ) super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , ) _lowerCAmelCase =do_lower_case _lowerCAmelCase =remove_space _lowerCAmelCase =keep_accents _lowerCAmelCase =vocab_file _lowerCAmelCase =False if not self.vocab_file else True def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[Any]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Optional[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Any: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
370
"""simple docstring""" import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase = JukeboxTokenizer lowerCamelCase = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def _lowerCAmelCase ( self ) -> str: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def _lowerCAmelCase ( self ) -> Any: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
341
0
"""simple docstring""" __A = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def _lowerCamelCase() -> Optional[Any]: _lowerCAmelCase =input("""Enter message: """ ) _lowerCAmelCase =input("""Enter key [alphanumeric]: """ ) _lowerCAmelCase =input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): _lowerCAmelCase ='''encrypt''' _lowerCAmelCase =encrypt_message(__UpperCamelCase , __UpperCamelCase ) elif mode.lower().startswith("""d""" ): _lowerCAmelCase ='''decrypt''' _lowerCAmelCase =decrypt_message(__UpperCamelCase , __UpperCamelCase ) print(F'''\n{mode.title()}ed message:''' ) print(__UpperCamelCase ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any: return translate_message(__UpperCamelCase , __UpperCamelCase , """encrypt""" ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> str: return translate_message(__UpperCamelCase , __UpperCamelCase , """decrypt""" ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple: _lowerCAmelCase =[] _lowerCAmelCase =0 _lowerCAmelCase =key.upper() for symbol in message: _lowerCAmelCase =LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(__UpperCamelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(__UpperCamelCase ): _lowerCAmelCase =0 else: translated.append(__UpperCamelCase ) return "".join(__UpperCamelCase ) if __name__ == "__main__": main()
371
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = '▁' __A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} __A = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } __A = {'vinai/bartpho-syllable': 1024} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =vocab_file _lowerCAmelCase =monolingual_vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _lowerCAmelCase ={} _lowerCAmelCase =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =cnt cnt += 1 with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _lowerCAmelCase =line.strip().split()[0] _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) _lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Dict: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None _lowerCAmelCase =self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCAmelCase ) -> List[Any]: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] _lowerCAmelCase =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCAmelCase ( self ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: return self.fairseq_ids_to_tokens[index] def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__UpperCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
341
0
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase = 100 ) -> str: _lowerCAmelCase =n * (n + 1) * (2 * n + 1) / 6 _lowerCAmelCase =(n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
350
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =1 _lowerCAmelCase =3 _lowerCAmelCase =(32, 32) _lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase ) return image @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , ) return model @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def _lowerCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) _lowerCAmelCase =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , ) return CLIPTextModel(__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0] _lowerCAmelCase =image[0, -3:, -3:, -1] _lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1] _lowerCAmelCase =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) _lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images assert image.shape[0] == 2 _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 _lowerCAmelCase =unet.half() _lowerCAmelCase =text_encoder.half() # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images _lowerCAmelCase =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat.npy""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 1e-3 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat_fp16.npy""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 5e-1 def _lowerCAmelCase ( self ) -> Optional[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , ) _lowerCAmelCase =torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
341
0
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int: return 1 if input_a == input_a else 0 def _lowerCamelCase() -> None: assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
351
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''cvt''' def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) _lowerCAmelCase =num_channels _lowerCAmelCase =patch_sizes _lowerCAmelCase =patch_stride _lowerCAmelCase =patch_padding _lowerCAmelCase =embed_dim _lowerCAmelCase =num_heads _lowerCAmelCase =depth _lowerCAmelCase =mlp_ratio _lowerCAmelCase =attention_drop_rate _lowerCAmelCase =drop_rate _lowerCAmelCase =drop_path_rate _lowerCAmelCase =qkv_bias _lowerCAmelCase =cls_token _lowerCAmelCase =qkv_projection_method _lowerCAmelCase =kernel_qkv _lowerCAmelCase =padding_kv _lowerCAmelCase =stride_kv _lowerCAmelCase =padding_q _lowerCAmelCase =stride_q _lowerCAmelCase =initializer_range _lowerCAmelCase =layer_norm_eps
341
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json', } class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''timesformer''' def __init__( self , __UpperCAmelCase=2_24 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=8 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase="divided_space_time" , __UpperCAmelCase=0 , **__UpperCAmelCase , ) -> Optional[int]: super().__init__(**__UpperCAmelCase ) _lowerCAmelCase =image_size _lowerCAmelCase =patch_size _lowerCAmelCase =num_channels _lowerCAmelCase =num_frames _lowerCAmelCase =hidden_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =intermediate_size _lowerCAmelCase =hidden_act _lowerCAmelCase =hidden_dropout_prob _lowerCAmelCase =attention_probs_dropout_prob _lowerCAmelCase =initializer_range _lowerCAmelCase =layer_norm_eps _lowerCAmelCase =qkv_bias _lowerCAmelCase =attention_type _lowerCAmelCase =drop_path_rate
352
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = ['''image_processor''', '''tokenizer'''] lowerCamelCase = '''CLIPImageProcessor''' lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''') def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase =None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __UpperCAmelCase , ) _lowerCAmelCase =kwargs.pop("""feature_extractor""" ) _lowerCAmelCase =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if images is not None: _lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: _lowerCAmelCase =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =self.tokenizer.model_input_names _lowerCAmelCase =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
341
0
"""simple docstring""" import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowerCamelCase__ ( _lowerCamelCase ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , **__UpperCAmelCase , ) -> Any: super().__init__(features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , **__UpperCAmelCase ) _lowerCAmelCase =Sql( cache_dir=__UpperCAmelCase , features=__UpperCAmelCase , sql=__UpperCAmelCase , con=__UpperCAmelCase , **__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =None _lowerCAmelCase =None _lowerCAmelCase =None _lowerCAmelCase =None self.builder.download_and_prepare( download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , ) # Build dataset for splits _lowerCAmelCase =self.builder.as_dataset( split="""train""" , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory ) return dataset class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Dict: if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) _lowerCAmelCase =dataset _lowerCAmelCase =name _lowerCAmelCase =con _lowerCAmelCase =batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _lowerCAmelCase =num_proc _lowerCAmelCase =to_sql_kwargs def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =self.to_sql_kwargs.pop("""sql""" , __UpperCAmelCase ) _lowerCAmelCase =self.to_sql_kwargs.pop("""con""" , __UpperCAmelCase ) _lowerCAmelCase =self.to_sql_kwargs.pop("""index""" , __UpperCAmelCase ) _lowerCAmelCase =self._write(index=__UpperCAmelCase , **self.to_sql_kwargs ) return written def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Any: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =args _lowerCAmelCase ={**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs _lowerCAmelCase =query_table( table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , ) _lowerCAmelCase =batch.to_pandas() _lowerCAmelCase =df.to_sql(self.name , self.con , index=__UpperCAmelCase , **__UpperCAmelCase ) return num_rows or len(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: _lowerCAmelCase =0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _lowerCAmelCase , _lowerCAmelCase =len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
353
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __A = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['PerceiverFeatureExtractor'] __A = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem __A = importlib.util.find_spec('s3fs') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 __A = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def _lowerCamelCase(__UpperCamelCase ) -> str: if "://" in dataset_path: _lowerCAmelCase =dataset_path.split("""://""" )[1] return dataset_path def _lowerCamelCase(__UpperCamelCase ) -> bool: if fs is not None and fs.protocol != "file": return True else: return False def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str: _lowerCAmelCase =not is_remote_filesystem(_lowerCAmelCase ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(_lowerCAmelCase ) , fs._strip_protocol(_lowerCAmelCase ) ) else: fs.mv(_lowerCAmelCase , _lowerCAmelCase , recursive=_lowerCAmelCase ) def _lowerCamelCase() -> None: if hasattr(fsspec.asyn , """reset_lock""" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: _lowerCAmelCase =None _lowerCAmelCase =None _lowerCAmelCase =threading.Lock()
354
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { 'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
"""simple docstring""" from math import ceil, sqrt def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> int: _lowerCAmelCase =0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: _lowerCAmelCase =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: _lowerCAmelCase =1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(F"""{solution() = }""")
355
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple: if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" ) _lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" ) _lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" ) _lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" ) _lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item _lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" ) _lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" ) _lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _lowerCAmelCase =old_checkpoint[path] _lowerCAmelCase =old_tensor.shape[0] // 3 _lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1) _lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3 _lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 ) _lowerCAmelCase =query.reshape(__UpperCamelCase ) _lowerCAmelCase =key.reshape(__UpperCamelCase ) _lowerCAmelCase =value.reshape(__UpperCamelCase ) for path in paths: _lowerCAmelCase =path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0] else: _lowerCAmelCase =old_checkpoint[path["""old"""]] def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase ={} _lowerCAmelCase =checkpoint["""time_embed.0.weight"""] _lowerCAmelCase =checkpoint["""time_embed.0.bias"""] _lowerCAmelCase =checkpoint["""time_embed.2.weight"""] _lowerCAmelCase =checkpoint["""time_embed.2.bias"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""] _lowerCAmelCase =checkpoint["""out.0.weight"""] _lowerCAmelCase =checkpoint["""out.0.bias"""] _lowerCAmelCase =checkpoint["""out.2.weight"""] _lowerCAmelCase =checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the middle blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the output blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } for i in range(1 , __UpperCamelCase ): _lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key] if F'''input_blocks.{i}.0.op.weight''' in checkpoint: _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.weight''' ] _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.bias''' ] continue _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} _lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase ) if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''input_blocks.{i}.1''', """new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''input_blocks.{i}.1.qkv.bias''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''input_blocks.{i}.1.qkv.weight''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , ) _lowerCAmelCase =middle_blocks[0] _lowerCAmelCase =middle_blocks[1] _lowerCAmelCase =middle_blocks[2] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase ) for i in range(__UpperCamelCase ): _lowerCAmelCase =i // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =i % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]] _lowerCAmelCase ={} for layer in output_block_layers: _lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__UpperCamelCase ) else: _lowerCAmelCase =[layer_name] if len(__UpperCamelCase ) > 1: _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.weight''' ] _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(__UpperCamelCase ) == 2: _lowerCAmelCase =[] if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''output_blocks.{i}.1''', """new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''output_blocks.{i}.1.qkv.bias''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''output_blocks.{i}.1.qkv.weight''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , ) else: _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] ) _lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] ) _lowerCAmelCase =checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') __A = parser.parse_args() __A = torch.load(args.checkpoint_path) with open(args.config_file) as f: __A = json.loads(f.read()) __A = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __A = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) __A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) __A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
341
0
"""simple docstring""" import math from collections.abc import Callable def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float: _lowerCAmelCase =xa _lowerCAmelCase =xa while True: if x_n == x_na or function(__UpperCAmelCase ) == function(__UpperCAmelCase ): raise ZeroDivisionError("""float division by zero, could not find root""" ) _lowerCAmelCase =x_na - ( function(__UpperCAmelCase ) / ((function(__UpperCAmelCase ) - function(__UpperCAmelCase )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na _lowerCAmelCase =x_na _lowerCAmelCase =x_na def _lowerCamelCase(__UpperCamelCase ) -> float: return math.pow(__UpperCAmelCase , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
356
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase =0 _lowerCAmelCase =len(__UpperCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , __UpperCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _lowerCamelCase(__UpperCamelCase ) -> List[Any]: if len(__UpperCamelCase ) <= 1: return arr, 0 _lowerCAmelCase =len(__UpperCamelCase ) // 2 _lowerCAmelCase =arr[0:mid] _lowerCAmelCase =arr[mid:] _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =inversion_p + inversions_q + cross_inversions return c, num_inversions def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any: _lowerCAmelCase =[] _lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0 while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__UpperCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__UpperCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _lowerCamelCase() -> str: _lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , __UpperCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) # an empty list should also have zero inversions _lowerCAmelCase =[] _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) if __name__ == "__main__": main()
341
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase__ ( A__ , A__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = CycleDiffusionPipeline lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { 'negative_prompt', 'height', 'width', 'negative_prompt_embeds', } lowerCamelCase = PipelineTesterMixin.required_optional_params - {'latents'} lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def _lowerCAmelCase ( self ) -> Dict: torch.manual_seed(0 ) _lowerCAmelCase =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) _lowerCAmelCase =DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , num_train_timesteps=10_00 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , ) torch.manual_seed(0 ) _lowerCAmelCase =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) _lowerCAmelCase =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _lowerCAmelCase =CLIPTextModel(lowerCamelCase__ ) _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase ={ """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Optional[int]: _lowerCAmelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) _lowerCAmelCase =image / 2 + 0.5 if str(lowerCamelCase__ ).startswith("""mps""" ): _lowerCAmelCase =torch.manual_seed(lowerCamelCase__ ) else: _lowerCAmelCase =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) _lowerCAmelCase ={ """prompt""": """An astronaut riding an elephant""", """source_prompt""": """An astronaut riding a horse""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """eta""": 0.1, """strength""": 0.8, """guidance_scale""": 3, """source_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.get_dummy_components() _lowerCAmelCase =CycleDiffusionPipeline(**lowerCamelCase__ ) _lowerCAmelCase =pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) _lowerCAmelCase =self.get_dummy_inputs(lowerCamelCase__ ) _lowerCAmelCase =pipe(**lowerCamelCase__ ) _lowerCAmelCase =output.images _lowerCAmelCase =images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _lowerCAmelCase =np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =self.get_dummy_components() for name, module in components.items(): if hasattr(lowerCamelCase__ , """half""" ): _lowerCAmelCase =module.half() _lowerCAmelCase =CycleDiffusionPipeline(**lowerCamelCase__ ) _lowerCAmelCase =pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) _lowerCAmelCase =self.get_dummy_inputs(lowerCamelCase__ ) _lowerCAmelCase =pipe(**lowerCamelCase__ ) _lowerCAmelCase =output.images _lowerCAmelCase =images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _lowerCAmelCase =np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def _lowerCAmelCase ( self ) -> Any: return super().test_save_load_local() @unittest.skip("""non-deterministic pipeline""" ) def _lowerCAmelCase ( self ) -> Any: return super().test_inference_batch_single_identical() @skip_mps def _lowerCAmelCase ( self ) -> Union[str, Any]: return super().test_dict_tuple_outputs_equivalent() @skip_mps def _lowerCAmelCase ( self ) -> Optional[int]: return super().test_save_load_optional_components() @skip_mps def _lowerCAmelCase ( self ) -> List[Any]: return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" ) _lowerCAmelCase =init_image.resize((5_12, 5_12) ) _lowerCAmelCase ="""CompVis/stable-diffusion-v1-4""" _lowerCAmelCase =DDIMScheduler.from_pretrained(lowerCamelCase__ , subfolder="""scheduler""" ) _lowerCAmelCase =CycleDiffusionPipeline.from_pretrained( lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa , revision="""fp16""" ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) pipe.enable_attention_slicing() _lowerCAmelCase ="""A black colored car""" _lowerCAmelCase ="""A blue colored car""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=lowerCamelCase__ , source_prompt=lowerCamelCase__ , image=lowerCamelCase__ , num_inference_steps=1_00 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCamelCase__ , output_type="""np""" , ) _lowerCAmelCase =output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5e-1 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" ) _lowerCAmelCase =init_image.resize((5_12, 5_12) ) _lowerCAmelCase ="""CompVis/stable-diffusion-v1-4""" _lowerCAmelCase =DDIMScheduler.from_pretrained(lowerCamelCase__ , subfolder="""scheduler""" ) _lowerCAmelCase =CycleDiffusionPipeline.from_pretrained(lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) pipe.enable_attention_slicing() _lowerCAmelCase ="""A black colored car""" _lowerCAmelCase ="""A blue colored car""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=lowerCamelCase__ , source_prompt=lowerCamelCase__ , image=lowerCamelCase__ , num_inference_steps=1_00 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCamelCase__ , output_type="""np""" , ) _lowerCAmelCase =output.images assert np.abs(image - expected_image ).max() < 2e-2
357
"""simple docstring""" import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = True lowerCamelCase = None lowerCamelCase = 1 lowerCamelCase = None lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None def _lowerCAmelCase ( self ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
341
0
import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder __A = '''base_with_context''' def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: _lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) ) _lowerCAmelCase =nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=A_ ) for lyr_num, lyr in enumerate(model.encoders ): _lowerCAmelCase =weights[F'''layers_{lyr_num}'''] _lowerCAmelCase =nn.Parameter( torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) ) _lowerCAmelCase =ly_weight['''attention'''] _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) ) return model def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> List[Any]: _lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=A_ ) for lyr_num, lyr in enumerate(model.encoders ): _lowerCAmelCase =weights[F'''layers_{lyr_num}'''] _lowerCAmelCase =ly_weight['''attention'''] _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter( torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) ) return model def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Tuple: _lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=A_ ) _lowerCAmelCase =nn.Parameter( torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) ) for lyr_num, lyr in enumerate(model.decoders ): _lowerCAmelCase =weights[F'''layers_{lyr_num}'''] _lowerCAmelCase =nn.Parameter( torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) ) _lowerCAmelCase =nn.Parameter( torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) ) _lowerCAmelCase =ly_weight['''self_attention'''] _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) _lowerCAmelCase =ly_weight['''MultiHeadDotProductAttention_0'''] _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter( torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) _lowerCAmelCase =nn.Parameter( torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) ) return model def _lowerCamelCase(__UpperCamelCase ) -> List[Any]: _lowerCAmelCase =checkpoints.load_tax_checkpoint(args.checkpoint_path ) _lowerCAmelCase =jnp.tree_util.tree_map(onp.array , A_ ) _lowerCAmelCase =[ '''from __gin__ import dynamic_registration''', '''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''', '''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''', '''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''', ] _lowerCAmelCase =os.path.join(args.checkpoint_path , """..""" , """config.gin""" ) _lowerCAmelCase =inference.parse_training_gin_file(A_ , A_ ) _lowerCAmelCase =inference.InferenceModel(args.checkpoint_path , A_ ) _lowerCAmelCase =DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" ) _lowerCAmelCase =SpectrogramNotesEncoder( max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , ) _lowerCAmelCase =SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , ) _lowerCAmelCase =TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) _lowerCAmelCase =load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , A_ ) _lowerCAmelCase =load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , A_ ) _lowerCAmelCase =load_decoder(ta_checkpoint["""target"""]["""decoder"""] , A_ ) _lowerCAmelCase =OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" ) _lowerCAmelCase =SpectrogramDiffusionPipeline( notes_encoder=A_ , continuous_encoder=A_ , decoder=A_ , scheduler=A_ , melgan=A_ , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument( '--checkpoint_path', default=F"""{MODEL}/checkpoint_500000""", type=str, required=False, help='Path to the original jax model checkpoint.', ) __A = parser.parse_args() main(args)
358
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int: return int((input_a, input_a).count(1 ) != 0 ) def _lowerCamelCase() -> None: assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
341
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { 'configuration_clipseg': [ 'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPSegConfig', 'CLIPSegTextConfig', 'CLIPSegVisionConfig', ], 'processing_clipseg': ['CLIPSegProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST', 'CLIPSegModel', 'CLIPSegPreTrainedModel', 'CLIPSegTextModel', 'CLIPSegVisionModel', 'CLIPSegForImageSegmentation', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
359
"""simple docstring""" import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py __A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' __A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' __A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple: _lowerCAmelCase =compute_bleu( reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
341
0
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase =int(__UpperCamelCase ) if n_element < 1: _lowerCAmelCase =ValueError("""a should be a positive number""" ) raise my_error _lowerCAmelCase =[1] _lowerCAmelCase =(0, 0, 0) _lowerCAmelCase =1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": __A = input('Enter the last number (nth term) of the Hamming Number Series: ') print('Formula of Hamming Number Series => 2^i * 3^j * 5^k') __A = hamming(int(n)) print('-----------------------------------------------------') print(F"""The list with nth numbers is: {hamming_numbers}""") print('-----------------------------------------------------')
360
"""simple docstring""" import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=512, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def _lowerCamelCase(__UpperCamelCase ) -> List[str]: if string == "True": return True elif string == "False": return False else: raise ValueError(F'''could not parse string as bool {string}''' ) parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) __A = parser.parse_args() __A = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
341
0
"""simple docstring""" from __future__ import annotations import math from collections.abc import Callable def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 100 , ) -> Optional[Any]: _lowerCAmelCase =x_start _lowerCAmelCase =fnc(_a ) _lowerCAmelCase =0.0 for _ in range(_a ): # Approximates curve as a sequence of linear lines and sums their length _lowerCAmelCase =(x_end - x_start) / steps + xa _lowerCAmelCase =fnc(_a ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step _lowerCAmelCase =xa _lowerCAmelCase =fxa return length if __name__ == "__main__": def _lowerCamelCase(__UpperCamelCase ) -> List[Any]: return math.sin(10 * x ) print('f(x) = sin(10 * x)') print('The length of the curve from x = -10 to x = 10 is:') __A = 10 while i <= 10_0000: print(F"""With {i} steps: {line_length(f, -10, 10, i)}""") i *= 10
361
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __A = { 'configuration_audio_spectrogram_transformer': [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ASTConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ASTForAudioClassification', 'ASTModel', 'ASTPreTrainedModel', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['ASTFeatureExtractor'] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __A = 16 __A = 32 def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = "bert-base-cased" ) -> List[str]: _lowerCAmelCase =AutoTokenizer.from_pretrained(__lowerCAmelCase ) _lowerCAmelCase =load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) _lowerCAmelCase =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset _lowerCAmelCase =datasets.map( __lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__lowerCAmelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowerCAmelCase =tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__lowerCAmelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(__lowerCAmelCase , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. _lowerCAmelCase =DataLoader( tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase ) _lowerCAmelCase =DataLoader( tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase ) return train_dataloader, eval_dataloader def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Tuple: _lowerCAmelCase =Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCAmelCase =config['''lr'''] _lowerCAmelCase =int(config["""num_epochs"""] ) _lowerCAmelCase =int(config["""seed"""] ) _lowerCAmelCase =int(config["""batch_size"""] ) _lowerCAmelCase =args.model_name_or_path set_seed(__lowerCAmelCase ) _lowerCAmelCase =get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCAmelCase =AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase ) # Instantiate optimizer _lowerCAmelCase =( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) _lowerCAmelCase =optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase ) if accelerator.state.deepspeed_plugin is not None: _lowerCAmelCase =accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: _lowerCAmelCase =1 _lowerCAmelCase =(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): _lowerCAmelCase =get_linear_schedule_with_warmup( optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , ) else: _lowerCAmelCase =DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCAmelCase =accelerator.prepare( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # We need to keep track of how many total steps we have iterated over _lowerCAmelCase =0 # We also need to keep track of the stating epoch so files are named properly _lowerCAmelCase =0 # Now we train the model _lowerCAmelCase =evaluate.load("""glue""" , """mrpc""" ) _lowerCAmelCase =0 _lowerCAmelCase ={} for epoch in range(__lowerCAmelCase , __lowerCAmelCase ): model.train() for step, batch in enumerate(__lowerCAmelCase ): _lowerCAmelCase =model(**__lowerCAmelCase ) _lowerCAmelCase =outputs.loss _lowerCAmelCase =loss / gradient_accumulation_steps accelerator.backward(__lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() _lowerCAmelCase =0 for step, batch in enumerate(__lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowerCAmelCase =model(**__lowerCAmelCase ) _lowerCAmelCase =outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times _lowerCAmelCase =accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__lowerCAmelCase ) - 1: _lowerCAmelCase =predictions[: len(eval_dataloader.dataset ) - samples_seen] _lowerCAmelCase =references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__lowerCAmelCase , references=__lowerCAmelCase , ) _lowerCAmelCase =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , __lowerCAmelCase ) _lowerCAmelCase =eval_metric['''accuracy'''] if best_performance < eval_metric["accuracy"]: _lowerCAmelCase =eval_metric['''accuracy'''] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def _lowerCamelCase() -> Dict: _lowerCAmelCase =argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=__lowerCAmelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__lowerCAmelCase , ) parser.add_argument( """--output_dir""" , type=__lowerCAmelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--performance_lower_bound""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , ) parser.add_argument( """--num_epochs""" , type=__lowerCAmelCase , default=3 , help="""Number of train epochs.""" , ) _lowerCAmelCase =parser.parse_args() _lowerCAmelCase ={'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": main()
362
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { 'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'], 'tokenization_m2m_100': ['M2M100Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST', 'M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { '''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''', } class lowerCamelCase__ ( a__ ): '''simple docstring''' lowerCamelCase = """open-llama""" def __init__( self , __UpperCAmelCase=10_00_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Tuple: _lowerCAmelCase =vocab_size _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =hidden_size _lowerCAmelCase =intermediate_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =hidden_act _lowerCAmelCase =initializer_range _lowerCAmelCase =rms_norm_eps _lowerCAmelCase =use_cache _lowerCAmelCase =kwargs.pop( """use_memorry_efficient_attention""" , lowerCAmelCase__ ) _lowerCAmelCase =hidden_dropout_prob _lowerCAmelCase =attention_dropout_prob _lowerCAmelCase =use_stable_embedding _lowerCAmelCase =shared_input_output_embedding _lowerCAmelCase =rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , **lowerCAmelCase__ , ) def _lowerCAmelCase ( self ) -> Union[str, Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , lowerCAmelCase__ ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f'''got {self.rope_scaling}''' ) _lowerCAmelCase =self.rope_scaling.get("""type""" , lowerCAmelCase__ ) _lowerCAmelCase =self.rope_scaling.get("""factor""" , lowerCAmelCase__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
363
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __A = datasets.logging.get_logger(__name__) __A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' __A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' __A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict: _lowerCAmelCase ={doc: key_lines} _lowerCAmelCase ={doc: sys_lines} _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase ) key_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase ) sys_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) if remove_nested: _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' ) logger.info( """Number of resulting singleton clusters in the key """ F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' ) if not keep_singletons: logger.info( F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' """files, respectively""" ) return doc_coref_infos def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: _lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 for name, metric in metrics: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} ) logger.info( name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , ) if conll_subparts_num == 3: _lowerCAmelCase =(conll / 3) * 100 logger.info(F'''CoNLL score: {conll:.2f}''' ) output_scores.update({"""conll_score""": conll} ) return output_scores def _lowerCamelCase(__UpperCamelCase ) -> Tuple: _lowerCAmelCase =False for line in key_lines: if not line.startswith("""#""" ): if len(line.split() ) > 6: _lowerCAmelCase =line.split()[5] if not parse_col == "-": _lowerCAmelCase =True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]: _lowerCAmelCase =[ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: _lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _lowerCAmelCase =evaluate( key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , ) return score
341
0
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase ) -> str: if number > 0: raise ValueError("""input must be a negative integer""" ) _lowerCAmelCase =len(bin(__UpperCamelCase )[3:] ) _lowerCAmelCase =bin(abs(__UpperCamelCase ) - (1 << binary_number_length) )[3:] _lowerCAmelCase =( ( """1""" + """0""" * (binary_number_length - len(__UpperCamelCase )) + twos_complement_number ) if number < 0 else """0""" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
364
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = XGLMConfig lowerCamelCase = {} lowerCamelCase = '''gelu''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_input_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =ffn_dim _lowerCAmelCase =activation_function _lowerCAmelCase =activation_dropout _lowerCAmelCase =attention_dropout _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =initializer_range _lowerCAmelCase =None _lowerCAmelCase =0 _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Dict: return XGLMConfig.from_pretrained("""facebook/xglm-564M""" ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _lowerCAmelCase =None if self.use_input_mask: _lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase =self.get_config() _lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowerCAmelCase ( self ) -> str: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) =config_and_inputs _lowerCAmelCase ={ """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCamelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =TFXGLMModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 ) def _lowerCAmelCase ( self ) -> int: self.config_tester.run_common_tests() @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" ) def _lowerCAmelCase ( self ) -> Union[str, Any]: super().test_resize_token_embeddings() @require_tf class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) tf.random.set_seed(0 ) _lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" ) _lowerCAmelCase =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0""" ): _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] ) _lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase ="""left""" # use different length sentences to test batching _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase ) _lowerCAmelCase =inputs["""input_ids"""] _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
341
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class lowerCamelCase__ ( __lowerCAmelCase ): '''simple docstring''' lowerCamelCase = '''microsoft/speecht5_tts''' lowerCamelCase = ( '''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the ''' '''text to read (in English) and returns a waveform object containing the sound.''' ) lowerCamelCase = '''text_reader''' lowerCamelCase = SpeechTaProcessor lowerCamelCase = SpeechTaForTextToSpeech lowerCamelCase = SpeechTaHifiGan lowerCamelCase = ['''text'''] lowerCamelCase = ['''audio'''] def _lowerCAmelCase ( self ) -> Optional[int]: if self.post_processor is None: _lowerCAmelCase ="""microsoft/speecht5_hifigan""" super().setup() def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]: _lowerCAmelCase =self.pre_processor(text=lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ ) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" ) _lowerCAmelCase =load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" ) _lowerCAmelCase =torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 ) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]: with torch.no_grad(): return self.model.generate_speech(**lowerCAmelCase_ ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str: with torch.no_grad(): return self.post_processor(lowerCAmelCase_ ).cpu().detach()
365
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __A = logging.get_logger(__name__) __A = {'vocab_file': 'spiece.model'} __A = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } __A = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) __A = 0 __A = 1 __A = 2 __A = 3 __A = 4 class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = '''left''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =3 _lowerCAmelCase =do_lower_case _lowerCAmelCase =remove_space _lowerCAmelCase =keep_accents _lowerCAmelCase =vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> str: return len(self.sp_model ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[int]: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None return state def __setstate__( self , __UpperCAmelCase ) -> Tuple: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]: if self.remove_space: _lowerCAmelCase =""" """.join(inputs.strip().split() ) else: _lowerCAmelCase =inputs _lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: _lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase ) _lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] ) if self.do_lower_case: _lowerCAmelCase =outputs.lower() return outputs def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: _lowerCAmelCase =self.preprocess_text(__UpperCAmelCase ) _lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) _lowerCAmelCase =[] for piece in pieces: if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): _lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _lowerCAmelCase =cur_pieces[1:] else: _lowerCAmelCase =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCAmelCase ) else: new_pieces.append(__UpperCAmelCase ) return new_pieces def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]: return self.sp_model.PieceToId(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.IdToPiece(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str: _lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase ) _lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _lowerCAmelCase =[] _lowerCAmelCase =[] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) _lowerCAmelCase =[] sub_texts.append(__UpperCAmelCase ) else: current_sub_text.append(__UpperCAmelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens _lowerCAmelCase ="""""".join(__UpperCAmelCase ) _lowerCAmelCase =( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase ) return clean_text else: return text def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] return ([0] * len(__UpperCAmelCase )) + [1, 1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
341
0
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __A = logging.get_logger(__name__) __A = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class lowerCamelCase__ ( _lowerCAmelCase ): '''simple docstring''' lowerCamelCase = '''marian''' lowerCamelCase = ['''past_key_values'''] lowerCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , __UpperCAmelCase=5_81_01 , __UpperCAmelCase=None , __UpperCAmelCase=10_24 , __UpperCAmelCase=12 , __UpperCAmelCase=40_96 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=40_96 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="gelu" , __UpperCAmelCase=10_24 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=5_81_00 , __UpperCAmelCase=False , __UpperCAmelCase=5_81_00 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=True , **__UpperCAmelCase , ) -> Tuple: _lowerCAmelCase =vocab_size _lowerCAmelCase =decoder_vocab_size or vocab_size _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =d_model _lowerCAmelCase =encoder_ffn_dim _lowerCAmelCase =encoder_layers _lowerCAmelCase =encoder_attention_heads _lowerCAmelCase =decoder_ffn_dim _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =dropout _lowerCAmelCase =attention_dropout _lowerCAmelCase =activation_dropout _lowerCAmelCase =activation_function _lowerCAmelCase =init_std _lowerCAmelCase =encoder_layerdrop _lowerCAmelCase =decoder_layerdrop _lowerCAmelCase =use_cache _lowerCAmelCase =encoder_layers _lowerCAmelCase =scale_embedding # scale factor will be sqrt(d_model) if True _lowerCAmelCase =share_encoder_decoder_embeddings super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , forced_eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) class lowerCamelCase__ ( _lowerCAmelCase ): '''simple docstring''' @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def _lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase =OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: _lowerCAmelCase ={0: """batch"""} _lowerCAmelCase ={0: """batch""", 1: """past_decoder_sequence + sequence"""} else: _lowerCAmelCase ={0: """batch""", 1: """decoder_sequence"""} _lowerCAmelCase ={0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. _lowerCAmelCase =OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: _lowerCAmelCase =self.num_layers for i in range(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase ={0: """batch""", 2: """past_sequence + sequence"""} _lowerCAmelCase ={0: """batch""", 2: """past_sequence + sequence"""} else: _lowerCAmelCase =OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def _lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase =super().outputs else: _lowerCAmelCase =super(SCREAMING_SNAKE_CASE_ , self ).outputs if self.use_past: _lowerCAmelCase =self.num_layers for i in range(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase ={0: """batch""", 2: """past_sequence + sequence"""} _lowerCAmelCase ={0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _lowerCAmelCase =self._generate_dummy_inputs_for_encoder_and_decoder( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Generate decoder inputs _lowerCAmelCase =seq_length if not self.use_past else 1 _lowerCAmelCase =self._generate_dummy_inputs_for_encoder_and_decoder( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase ={f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} _lowerCAmelCase =dict(**SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch _lowerCAmelCase =common_inputs["""input_ids"""].shape _lowerCAmelCase =common_inputs["""decoder_input_ids"""].shape[1] _lowerCAmelCase =self.num_attention_heads _lowerCAmelCase =( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _lowerCAmelCase =decoder_seq_length + 3 _lowerCAmelCase =( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _lowerCAmelCase =torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] , dim=1 ) _lowerCAmelCase =[] # If the number of encoder and decoder layers are present in the model configuration, both are considered _lowerCAmelCase =self.num_layers _lowerCAmelCase =min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) - min_num_layers _lowerCAmelCase ="""encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(SCREAMING_SNAKE_CASE_ ): common_inputs["past_key_values"].append( ( torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ ), ) ) # TODO: test this. _lowerCAmelCase =encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) ) return common_inputs def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _lowerCAmelCase =self._generate_dummy_inputs_for_encoder_and_decoder( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch _lowerCAmelCase =common_inputs["""input_ids"""].shape # Not using the same length for past_key_values _lowerCAmelCase =seqlen + 2 _lowerCAmelCase =self.num_layers _lowerCAmelCase =self.num_attention_heads _lowerCAmelCase =( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _lowerCAmelCase =common_inputs["""attention_mask"""].dtype _lowerCAmelCase =torch.cat( [common_inputs["""attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 ) _lowerCAmelCase =[ (torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(SCREAMING_SNAKE_CASE_ ) ] return common_inputs def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _lowerCAmelCase =compute_effective_axis_dimension( SCREAMING_SNAKE_CASE_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _lowerCAmelCase =tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =compute_effective_axis_dimension( SCREAMING_SNAKE_CASE_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE_ ) # Generate dummy inputs according to compute batch and sequence _lowerCAmelCase =[""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size _lowerCAmelCase =dict(tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) ) return common_inputs def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase =self._generate_dummy_inputs_for_default_and_seqaseq_lm( SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ ) else: _lowerCAmelCase =self._generate_dummy_inputs_for_causal_lm( SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ ) return common_inputs def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase =super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: _lowerCAmelCase =super(SCREAMING_SNAKE_CASE_ , self )._flatten_past_key_values_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @property def _lowerCAmelCase ( self ) -> float: return 1e-4
366
"""simple docstring""" from __future__ import annotations def _lowerCamelCase(__UpperCamelCase ) -> bool: _lowerCAmelCase =str(__UpperCamelCase ) return n == n[::-1] def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str: _lowerCAmelCase =0 for i in range(1 , __UpperCamelCase ): if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
341
0
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm __A = logging.get_logger(__name__) @dataclass class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self , **__UpperCAmelCase ) -> Union[str, Any]: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: _lowerCAmelCase =deprecated_arg[3:] setattr(self , UpperCamelCase__ , not kwargs.pop(UpperCamelCase__ ) ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) _lowerCAmelCase =kwargs.pop("""torchscript""" , self.torchscript ) _lowerCAmelCase =kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) _lowerCAmelCase =kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**UpperCamelCase__ ) lowerCamelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Trace the models using torchscript'''} ) lowerCamelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} ) lowerCamelCase = field( default='''O1''' , metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) } , ) @cached_property def _lowerCAmelCase ( self ) -> Tuple["torch.device", int]: requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: _lowerCAmelCase =torch.device("""cpu""" ) _lowerCAmelCase =0 elif is_torch_tpu_available(): _lowerCAmelCase =xm.xla_device() _lowerCAmelCase =0 else: _lowerCAmelCase =torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) _lowerCAmelCase =torch.cuda.device_count() return device, n_gpu @property def _lowerCAmelCase ( self ) -> Optional[Any]: return is_torch_tpu_available() and self.tpu @property def _lowerCAmelCase ( self ) -> int: requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def _lowerCAmelCase ( self ) -> "torch.device": requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def _lowerCAmelCase ( self ) -> Dict: requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def _lowerCAmelCase ( self ) -> List[Any]: return self.n_gpu > 0
367
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''llama''' lowerCamelCase = ['''past_key_values'''] def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: _lowerCAmelCase =vocab_size _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =hidden_size _lowerCAmelCase =intermediate_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads # for backward compatibility if num_key_value_heads is None: _lowerCAmelCase =num_attention_heads _lowerCAmelCase =num_key_value_heads _lowerCAmelCase =hidden_act _lowerCAmelCase =initializer_range _lowerCAmelCase =rms_norm_eps _lowerCAmelCase =pretraining_tp _lowerCAmelCase =use_cache _lowerCAmelCase =rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> str: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f'''got {self.rope_scaling}''' ) _lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase ) _lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
341
0
"""simple docstring""" from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) __A = 2_9979_2458 # Symbols __A = symbols('ct x y z') def _lowerCamelCase(__UpperCamelCase ) -> float: if velocity > c: raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("""Speed must be greater than or equal to 1!""" ) return velocity / c def _lowerCamelCase(__UpperCamelCase ) -> float: return 1 / sqrt(1 - beta(lowerCamelCase_ ) ** 2 ) def _lowerCamelCase(__UpperCamelCase ) -> np.ndarray: return np.array( [ [gamma(lowerCamelCase_ ), -gamma(lowerCamelCase_ ) * beta(lowerCamelCase_ ), 0, 0], [-gamma(lowerCamelCase_ ) * beta(lowerCamelCase_ ), gamma(lowerCamelCase_ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase = None ) -> np.ndarray: # Ensure event is not empty if event is None: _lowerCAmelCase =np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(lowerCamelCase_ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: __A = transform(2997_9245) print('Example of four vector: ') print(F"""ct' = {four_vector[0]}""") print(F"""x' = {four_vector[1]}""") print(F"""y' = {four_vector[2]}""") print(F"""z' = {four_vector[3]}""") # Substitute symbols with numerical values __A = {ct: c, x: 1, y: 1, z: 1} __A = [four_vector[i].subs(sub_dict) for i in range(4)] print(F"""\n{numerical_vector}""")
368
"""simple docstring""" import warnings from .generation import TFGenerationMixin class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' # warning at import time warnings.warn( '''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ''' '''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __magic_name__ , )
341
0
from math import factorial def _lowerCamelCase(__UpperCamelCase = 100 ) -> str: return sum(map(__UpperCamelCase , str(factorial(__UpperCamelCase ) ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
369
"""simple docstring""" import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =decoder_seq_length # For common tests _lowerCAmelCase =self.decoder_seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_attention_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =d_model _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_ffn_dim _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =eos_token_id _lowerCAmelCase =bos_token_id _lowerCAmelCase =pad_token_id _lowerCAmelCase =decoder_start_token_id _lowerCAmelCase =use_cache _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =None _lowerCAmelCase =decoder_seq_length _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =None if self.use_attention_mask: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) _lowerCAmelCase =None if self.use_labels: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]: _lowerCAmelCase =True _lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval() _lowerCAmelCase =input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 ) _lowerCAmelCase =outputs["""past_key_values"""] # create hypothetical next token and extent to next_input_ids _lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and _lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 ) _lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""] _lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""] # select random slice _lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() _lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs _lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else () lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {} lowerCamelCase = True lowerCamelCase = False def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> List[str]: pass def _lowerCAmelCase ( self ) -> List[Any]: pass def _lowerCAmelCase ( self ) -> Any: pass def _lowerCAmelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: return @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def _lowerCAmelCase ( self ) -> str: pass
341
0
"""simple docstring""" print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
370
"""simple docstring""" import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase = JukeboxTokenizer lowerCamelCase = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def _lowerCAmelCase ( self ) -> str: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def _lowerCAmelCase ( self ) -> Any: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
341
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'} class lowerCamelCase__ ( _a ): '''simple docstring''' lowerCamelCase = """ctrl""" lowerCamelCase = ["""past_key_values"""] lowerCamelCase = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , __UpperCAmelCase=24_65_34 , __UpperCAmelCase=2_56 , __UpperCAmelCase=12_80 , __UpperCAmelCase=81_92 , __UpperCAmelCase=48 , __UpperCAmelCase=16 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , **__UpperCAmelCase , ) -> int: _lowerCAmelCase =vocab_size _lowerCAmelCase =n_positions _lowerCAmelCase =n_embd _lowerCAmelCase =n_layer _lowerCAmelCase =n_head _lowerCAmelCase =dff _lowerCAmelCase =resid_pdrop _lowerCAmelCase =embd_pdrop _lowerCAmelCase =layer_norm_epsilon _lowerCAmelCase =initializer_range _lowerCAmelCase =use_cache super().__init__(**__lowerCAmelCase )
371
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = '▁' __A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} __A = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } __A = {'vinai/bartpho-syllable': 1024} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =vocab_file _lowerCAmelCase =monolingual_vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _lowerCAmelCase ={} _lowerCAmelCase =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =cnt cnt += 1 with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _lowerCAmelCase =line.strip().split()[0] _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) _lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Dict: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None _lowerCAmelCase =self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCAmelCase ) -> List[Any]: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] _lowerCAmelCase =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCAmelCase ( self ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: return self.fairseq_ids_to_tokens[index] def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__UpperCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
341
0
"""simple docstring""" __A = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]: # Make sure the supplied data is a bytes-like object if not isinstance(__a , __a ): _lowerCAmelCase =F'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(__a ) _lowerCAmelCase =''.join(bin(__a )[2:].zfill(8 ) for byte in data ) _lowerCAmelCase =len(__a ) % 6 != 0 if padding_needed: # The padding that will be added later _lowerCAmelCase =b'=' * ((6 - len(__a ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(__a ) % 6) else: _lowerCAmelCase =b'' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(__a ) , 6 ) ).encode() + padding ) def _lowerCamelCase(__UpperCamelCase ) -> Dict: # Make sure encoded_data is either a string or a bytes-like object if not isinstance(__a , __a ) and not isinstance(__a , __a ): _lowerCAmelCase =( 'argument should be a bytes-like object or ASCII string, ' F'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(__a ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(__a , __a ): try: _lowerCAmelCase =encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) _lowerCAmelCase =encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(__a ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one _lowerCAmelCase =encoded_data[:-padding] _lowerCAmelCase =''.join( bin(B64_CHARSET.index(__a ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: _lowerCAmelCase =''.join( bin(B64_CHARSET.index(__a ) )[2:].zfill(6 ) for char in encoded_data ) _lowerCAmelCase =[ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(__a ) , 8 ) ] return bytes(__a ) if __name__ == "__main__": import doctest doctest.testmod()
350
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =1 _lowerCAmelCase =3 _lowerCAmelCase =(32, 32) _lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase ) return image @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , ) return model @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def _lowerCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) _lowerCAmelCase =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , ) return CLIPTextModel(__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0] _lowerCAmelCase =image[0, -3:, -3:, -1] _lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1] _lowerCAmelCase =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) _lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images assert image.shape[0] == 2 _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 _lowerCAmelCase =unet.half() _lowerCAmelCase =text_encoder.half() # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images _lowerCAmelCase =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat.npy""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 1e-3 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat_fp16.npy""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 5e-1 def _lowerCAmelCase ( self ) -> Optional[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , ) _lowerCAmelCase =torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
341
0
import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin __A = 1E-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=16 , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=14 , __UpperCAmelCase=10 , __UpperCAmelCase=19 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=True , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=[1, 2, 3, 4, 5] , __UpperCAmelCase=25 , __UpperCAmelCase=5 , ) -> Optional[Any]: _lowerCAmelCase =d_model _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =prediction_length _lowerCAmelCase =context_length _lowerCAmelCase =cardinality _lowerCAmelCase =num_time_features _lowerCAmelCase =lags_sequence _lowerCAmelCase =embedding_dimension _lowerCAmelCase =is_training _lowerCAmelCase =hidden_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =intermediate_size _lowerCAmelCase =hidden_act _lowerCAmelCase =hidden_dropout_prob _lowerCAmelCase =attention_probs_dropout_prob _lowerCAmelCase =context_length _lowerCAmelCase =prediction_length + label_length _lowerCAmelCase =label_length _lowerCAmelCase =moving_average _lowerCAmelCase =autocorrelation_factor def _lowerCAmelCase ( self ) -> Any: return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]: _lowerCAmelCase =config.context_length + max(config.lags_sequence ) _lowerCAmelCase =ids_tensor([self.batch_size, 1] , config.cardinality[0] ) _lowerCAmelCase =floats_tensor([self.batch_size, _past_length, config.num_time_features] ) _lowerCAmelCase =floats_tensor([self.batch_size, _past_length] ) _lowerCAmelCase =floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs _lowerCAmelCase =floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) _lowerCAmelCase =floats_tensor([self.batch_size, config.prediction_length] ) _lowerCAmelCase ={ """past_values""": past_values, """static_categorical_features""": static_categorical_features, """past_time_features""": past_time_features, """past_observed_mask""": past_observed_mask, """future_time_features""": future_time_features, """future_values""": future_values, } return inputs_dict def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =self.get_config() _lowerCAmelCase =self.prepare_autoformer_inputs_dict(__UpperCAmelCase ) return config, inputs_dict def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.prepare_config_and_inputs() return config, inputs_dict def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: _lowerCAmelCase =AutoformerModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval() _lowerCAmelCase =model(**__UpperCAmelCase ) _lowerCAmelCase =outputs.encoder_last_hidden_state _lowerCAmelCase =outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase =model.get_encoder() encoder.save_pretrained(__UpperCAmelCase ) _lowerCAmelCase =AutoformerEncoder.from_pretrained(__UpperCAmelCase ).to(__UpperCAmelCase ) _lowerCAmelCase =model.create_network_inputs(**__UpperCAmelCase ) _lowerCAmelCase =model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) _lowerCAmelCase =torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) _lowerCAmelCase =encoder(inputs_embeds=__UpperCAmelCase )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) _lowerCAmelCase =( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) _lowerCAmelCase =torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) _lowerCAmelCase =torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) _lowerCAmelCase =torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase =model.get_decoder() decoder.save_pretrained(__UpperCAmelCase ) _lowerCAmelCase =AutoformerDecoder.from_pretrained(__UpperCAmelCase ).to(__UpperCAmelCase ) _lowerCAmelCase =decoder( trend=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class lowerCamelCase__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () lowerCamelCase = (AutoformerForPrediction,) if is_torch_available() else () lowerCamelCase = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {} lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =AutoformerModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: _lowerCAmelCase =model_class(__UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCAmelCase ) _lowerCAmelCase =model_class.from_pretrained(__UpperCAmelCase , output_loading_info=__UpperCAmelCase ) self.assertEqual(info["""missing_keys"""] , [] ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*__UpperCAmelCase ) @unittest.skip(reason="""Model has no tokens embeddings""" ) def _lowerCAmelCase ( self ) -> Dict: pass def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =inspect.signature(getattr(__UpperCAmelCase , """forward""" ) ) # The main input is the name of the argument after `self` _lowerCAmelCase =list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase =model_class(__UpperCAmelCase ) _lowerCAmelCase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase =[*signature.parameters.keys()] _lowerCAmelCase =[ """past_values""", """past_time_features""", """past_observed_mask""", """static_categorical_features""", """static_real_features""", """future_values""", """future_time_features""", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("""future_observed_mask""" ) expected_arg_names.extend( [ """decoder_attention_mask""", """head_mask""", """decoder_head_mask""", """cross_attn_head_mask""", """encoder_outputs""", """past_key_values""", """output_hidden_states""", """output_attentions""", """use_cache""", """return_dict""", ] ) self.assertListEqual(arg_names[: len(__UpperCAmelCase )] , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase =True _lowerCAmelCase =getattr(self.model_tester , """seq_length""" , __UpperCAmelCase ) _lowerCAmelCase =getattr(self.model_tester , """decoder_seq_length""" , __UpperCAmelCase ) _lowerCAmelCase =getattr(self.model_tester , """encoder_seq_length""" , __UpperCAmelCase ) _lowerCAmelCase =getattr(self.model_tester , """d_model""" , __UpperCAmelCase ) _lowerCAmelCase =getattr(self.model_tester , """num_attention_heads""" , __UpperCAmelCase ) _lowerCAmelCase =d_model // num_attention_heads for model_class in self.all_model_classes: _lowerCAmelCase =True _lowerCAmelCase =False _lowerCAmelCase =True _lowerCAmelCase =model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): _lowerCAmelCase =model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) _lowerCAmelCase =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _lowerCAmelCase =True _lowerCAmelCase =model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): _lowerCAmelCase =model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) _lowerCAmelCase =outputs.encoder_attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) _lowerCAmelCase =len(__UpperCAmelCase ) _lowerCAmelCase =7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) # decoder attentions _lowerCAmelCase =outputs.decoder_attentions self.assertIsInstance(__UpperCAmelCase , (list, tuple) ) self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions _lowerCAmelCase =outputs.cross_attentions self.assertIsInstance(__UpperCAmelCase , (list, tuple) ) self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine _lowerCAmelCase =True _lowerCAmelCase =True _lowerCAmelCase =model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): _lowerCAmelCase =model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(out_len + 2 , len(__UpperCAmelCase ) ) _lowerCAmelCase =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def _lowerCAmelCase ( self ) -> Optional[int]: super().test_retain_grad_hidden_states_attentions() def _lowerCamelCase(__UpperCamelCase="train-batch.pt" ) -> int: _lowerCAmelCase =hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=A__ , repo_type="""dataset""" ) _lowerCAmelCase =torch.load(A__ , map_location=A__ ) return batch @require_torch @slow class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__UpperCAmelCase ) _lowerCAmelCase =prepare_batch() with torch.no_grad(): _lowerCAmelCase =model( past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0] _lowerCAmelCase =torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) _lowerCAmelCase =torch.tensor( [[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=__UpperCAmelCase ) self.assertTrue(torch.allclose(output[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__UpperCAmelCase ) _lowerCAmelCase =prepare_batch("""val-batch.pt""" ) with torch.no_grad(): _lowerCAmelCase =model( past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state _lowerCAmelCase =torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , __UpperCAmelCase ) _lowerCAmelCase =torch.tensor( [[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=__UpperCAmelCase ) self.assertTrue(torch.allclose(output[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__UpperCAmelCase ) _lowerCAmelCase =prepare_batch("""val-batch.pt""" ) with torch.no_grad(): _lowerCAmelCase =model.generate( static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , ) _lowerCAmelCase =torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , __UpperCAmelCase ) _lowerCAmelCase =torch.tensor([31_30.67_63, 40_56.52_93, 70_53.07_86] , device=__UpperCAmelCase ) _lowerCAmelCase =outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __UpperCAmelCase , rtol=1e-1 ) )
351
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''cvt''' def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) _lowerCAmelCase =num_channels _lowerCAmelCase =patch_sizes _lowerCAmelCase =patch_stride _lowerCAmelCase =patch_padding _lowerCAmelCase =embed_dim _lowerCAmelCase =num_heads _lowerCAmelCase =depth _lowerCAmelCase =mlp_ratio _lowerCAmelCase =attention_drop_rate _lowerCAmelCase =drop_rate _lowerCAmelCase =drop_path_rate _lowerCAmelCase =qkv_bias _lowerCAmelCase =cls_token _lowerCAmelCase =qkv_projection_method _lowerCAmelCase =kernel_qkv _lowerCAmelCase =padding_kv _lowerCAmelCase =stride_kv _lowerCAmelCase =padding_q _lowerCAmelCase =stride_q _lowerCAmelCase =initializer_range _lowerCAmelCase =layer_norm_eps
341
0
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=sys.maxsize ) -> List[str]: _lowerCAmelCase ='''bilinear''' _lowerCAmelCase =max_size _lowerCAmelCase =short_edge_length def __call__( self , __UpperCAmelCase ) -> Dict: _lowerCAmelCase =[] for img in imgs: _lowerCAmelCase =img.shape[:2] # later: provide list and randomly choose index for resize _lowerCAmelCase =np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img _lowerCAmelCase =size * 1.0 / min(snake_case_ , snake_case_ ) if h < w: _lowerCAmelCase =size, scale * w else: _lowerCAmelCase =scale * h, size if max(snake_case_ , snake_case_ ) > self.max_size: _lowerCAmelCase =self.max_size * 1.0 / max(snake_case_ , snake_case_ ) _lowerCAmelCase =newh * scale _lowerCAmelCase =neww * scale _lowerCAmelCase =int(neww + 0.5 ) _lowerCAmelCase =int(newh + 0.5 ) if img.dtype == np.uinta: _lowerCAmelCase =Image.fromarray(snake_case_ ) _lowerCAmelCase =pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) _lowerCAmelCase =np.asarray(snake_case_ ) else: _lowerCAmelCase =img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw _lowerCAmelCase =nn.functional.interpolate( snake_case_ , (newh, neww) , mode=self.interp_method , align_corners=snake_case_ ).squeeze(0 ) img_augs.append(snake_case_ ) return img_augs class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase ) -> Optional[int]: _lowerCAmelCase =ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) _lowerCAmelCase =cfg.INPUT.FORMAT _lowerCAmelCase =cfg.SIZE_DIVISIBILITY _lowerCAmelCase =cfg.PAD_VALUE _lowerCAmelCase =cfg.INPUT.MAX_SIZE_TEST _lowerCAmelCase =cfg.MODEL.DEVICE _lowerCAmelCase =torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) _lowerCAmelCase =torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) _lowerCAmelCase =lambda __UpperCAmelCase : (x - self.pixel_mean) / self.pixel_std def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Dict: _lowerCAmelCase =tuple(max(snake_case_ ) for s in zip(*[img.shape for img in images] ) ) _lowerCAmelCase =[im.shape[-2:] for im in images] _lowerCAmelCase =[ nn.functional.pad( snake_case_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(snake_case_ , snake_case_ ) ] return torch.stack(snake_case_ ), torch.tensor(snake_case_ ) def __call__( self , __UpperCAmelCase , __UpperCAmelCase=False ) -> List[str]: with torch.no_grad(): if not isinstance(snake_case_ , snake_case_ ): _lowerCAmelCase =[images] if single_image: assert len(snake_case_ ) == 1 for i in range(len(snake_case_ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(snake_case_ , images.pop(snake_case_ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( snake_case_ , torch.as_tensor(img_tensorize(images.pop(snake_case_ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge _lowerCAmelCase =torch.tensor([im.shape[:2] for im in images] ) _lowerCAmelCase =self.aug(snake_case_ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic _lowerCAmelCase =[self.normalizer(snake_case_ ) for x in images] # now pad them to do the following operations _lowerCAmelCase =self.pad(snake_case_ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad _lowerCAmelCase =torch.true_divide(snake_case_ , snake_case_ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> str: assert torch.isfinite(lowerCAmelCase__ ).all(), "Box tensor contains infinite or NaN!" _lowerCAmelCase =box_size tensor[:, 0].clamp_(min=0 , max=lowerCAmelCase__ ) tensor[:, 1].clamp_(min=0 , max=lowerCAmelCase__ ) tensor[:, 2].clamp_(min=0 , max=lowerCAmelCase__ ) tensor[:, 3].clamp_(min=0 , max=lowerCAmelCase__ )
352
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = ['''image_processor''', '''tokenizer'''] lowerCamelCase = '''CLIPImageProcessor''' lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''') def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase =None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __UpperCAmelCase , ) _lowerCAmelCase =kwargs.pop("""feature_extractor""" ) _lowerCAmelCase =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if images is not None: _lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: _lowerCAmelCase =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =self.tokenizer.model_input_names _lowerCAmelCase =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
341
0
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =[ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =[ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =[ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =[ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =[ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", # Removed: 'text_encoder/model.safetensors', """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertFalse(is_safetensors_compatible(__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =[ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] _lowerCAmelCase ="""fp16""" self.assertTrue(is_safetensors_compatible(__UpperCAmelCase , variant=__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =[ """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] _lowerCAmelCase ="""fp16""" self.assertTrue(is_safetensors_compatible(__UpperCAmelCase , variant=__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =[ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] _lowerCAmelCase ="""fp16""" self.assertTrue(is_safetensors_compatible(__UpperCAmelCase , variant=__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =[ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _lowerCAmelCase ="""fp16""" self.assertFalse(is_safetensors_compatible(__UpperCAmelCase , variant=__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =[ """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", ] _lowerCAmelCase ="""fp16""" self.assertTrue(is_safetensors_compatible(__UpperCAmelCase , variant=__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =[ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] _lowerCAmelCase ="""fp16""" self.assertTrue(is_safetensors_compatible(__UpperCAmelCase , variant=__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =[ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", # 'text_encoder/model.fp16.safetensors', """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] _lowerCAmelCase ="""fp16""" self.assertFalse(is_safetensors_compatible(__UpperCAmelCase , variant=__UpperCAmelCase ) )
353
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __A = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['PerceiverFeatureExtractor'] __A = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = "▁" __A = {"vocab_file": "sentencepiece.bpe.model"} __A = { "vocab_file": { "facebook/mbart-large-50-one-to-many-mmt": ( "https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model" ), } } __A = { "facebook/mbart-large-50-one-to-many-mmt": 1024, } # fmt: off __A = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] class lowerCamelCase__ ( lowercase_ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = ["input_ids", "attention_mask"] lowerCamelCase = [] lowerCamelCase = [] def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: _lowerCAmelCase =AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs _lowerCAmelCase =kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=a__ , tgt_lang=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , ) _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(a__ ) ) _lowerCAmelCase =vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _lowerCAmelCase ={"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _lowerCAmelCase =1 _lowerCAmelCase =len(self.sp_model ) _lowerCAmelCase ={ code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(a__ ) } _lowerCAmelCase ={v: k for k, v in self.lang_code_to_id.items()} _lowerCAmelCase =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) _lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()} _lowerCAmelCase =src_lang if src_lang is not None else """en_XX""" _lowerCAmelCase =self.lang_code_to_id[self._src_lang] _lowerCAmelCase =tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _lowerCAmelCase ( self ) -> int: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _lowerCAmelCase ( self ) -> str: return self._src_lang @src_lang.setter def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None: _lowerCAmelCase =new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ) -> Dict: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None return state def __setstate__( self , __UpperCAmelCase ) -> None: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase ={self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.encode(a__ , out_type=a__ ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> int: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _lowerCAmelCase =self.sp_model.PieceToId(a__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str: _lowerCAmelCase =[] _lowerCAmelCase ="""""" _lowerCAmelCase =False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(a__ ) + token _lowerCAmelCase =True _lowerCAmelCase =[] else: current_sub_tokens.append(a__ ) _lowerCAmelCase =False out_string += self.sp_model.decode(a__ ) return out_string.strip() def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(a__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , a__ ) elif not os.path.isfile(self.vocab_file ): with open(a__ , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(a__ ) return (out_vocab_file,) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ ) _lowerCAmelCase =[1] * len(self.prefix_tokens ) _lowerCAmelCase =[1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(a__ )) + suffix_ones return prefix_ones + ([0] * len(a__ )) + ([0] * len(a__ )) + suffix_ones def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) _lowerCAmelCase =src_lang _lowerCAmelCase =self(a__ , add_special_tokens=a__ , return_tensors=a__ , **a__ ) _lowerCAmelCase =self.convert_tokens_to_ids(a__ ) _lowerCAmelCase =tgt_lang_id return inputs def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = "en_XX" , __UpperCAmelCase = None , __UpperCAmelCase = "ro_RO" , **__UpperCAmelCase , ) -> BatchEncoding: _lowerCAmelCase =src_lang _lowerCAmelCase =tgt_lang return super().prepare_seqaseq_batch(a__ , a__ , **a__ ) def _lowerCAmelCase ( self ) -> List[Any]: return self.set_src_lang_special_tokens(self.src_lang ) def _lowerCAmelCase ( self ) -> str: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None: _lowerCAmelCase =self.lang_code_to_id[src_lang] _lowerCAmelCase =[self.cur_lang_code_id] _lowerCAmelCase =[self.eos_token_id] def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None: _lowerCAmelCase =self.lang_code_to_id[tgt_lang] _lowerCAmelCase =[self.cur_lang_code_id] _lowerCAmelCase =[self.eos_token_id]
354
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { 'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
"""simple docstring""" __A = 9.80_665 def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = g ) -> Any: if fluid_density <= 0: raise ValueError("""Impossible fluid density""" ) if volume < 0: raise ValueError("""Impossible Object volume""" ) if gravity <= 0: raise ValueError("""Impossible Gravity""" ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
355
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple: if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" ) _lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" ) _lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" ) _lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" ) _lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item _lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" ) _lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" ) _lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _lowerCAmelCase =old_checkpoint[path] _lowerCAmelCase =old_tensor.shape[0] // 3 _lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1) _lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3 _lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 ) _lowerCAmelCase =query.reshape(__UpperCamelCase ) _lowerCAmelCase =key.reshape(__UpperCamelCase ) _lowerCAmelCase =value.reshape(__UpperCamelCase ) for path in paths: _lowerCAmelCase =path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0] else: _lowerCAmelCase =old_checkpoint[path["""old"""]] def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase ={} _lowerCAmelCase =checkpoint["""time_embed.0.weight"""] _lowerCAmelCase =checkpoint["""time_embed.0.bias"""] _lowerCAmelCase =checkpoint["""time_embed.2.weight"""] _lowerCAmelCase =checkpoint["""time_embed.2.bias"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""] _lowerCAmelCase =checkpoint["""out.0.weight"""] _lowerCAmelCase =checkpoint["""out.0.bias"""] _lowerCAmelCase =checkpoint["""out.2.weight"""] _lowerCAmelCase =checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the middle blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the output blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } for i in range(1 , __UpperCamelCase ): _lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key] if F'''input_blocks.{i}.0.op.weight''' in checkpoint: _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.weight''' ] _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.bias''' ] continue _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} _lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase ) if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''input_blocks.{i}.1''', """new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''input_blocks.{i}.1.qkv.bias''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''input_blocks.{i}.1.qkv.weight''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , ) _lowerCAmelCase =middle_blocks[0] _lowerCAmelCase =middle_blocks[1] _lowerCAmelCase =middle_blocks[2] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase ) for i in range(__UpperCamelCase ): _lowerCAmelCase =i // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =i % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]] _lowerCAmelCase ={} for layer in output_block_layers: _lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__UpperCamelCase ) else: _lowerCAmelCase =[layer_name] if len(__UpperCamelCase ) > 1: _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.weight''' ] _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(__UpperCamelCase ) == 2: _lowerCAmelCase =[] if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''output_blocks.{i}.1''', """new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''output_blocks.{i}.1.qkv.bias''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''output_blocks.{i}.1.qkv.weight''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , ) else: _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] ) _lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] ) _lowerCAmelCase =checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') __A = parser.parse_args() __A = torch.load(args.checkpoint_path) with open(args.config_file) as f: __A = json.loads(f.read()) __A = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __A = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) __A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) __A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
341
0
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase ) -> List[Any]: if p < 2: raise ValueError("""p should not be less than 2!""" ) elif p == 2: return True _lowerCAmelCase =4 _lowerCAmelCase =(1 << p) - 1 for _ in range(p - 2 ): _lowerCAmelCase =((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
356
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase =0 _lowerCAmelCase =len(__UpperCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , __UpperCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _lowerCamelCase(__UpperCamelCase ) -> List[Any]: if len(__UpperCamelCase ) <= 1: return arr, 0 _lowerCAmelCase =len(__UpperCamelCase ) // 2 _lowerCAmelCase =arr[0:mid] _lowerCAmelCase =arr[mid:] _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =inversion_p + inversions_q + cross_inversions return c, num_inversions def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any: _lowerCAmelCase =[] _lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0 while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__UpperCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__UpperCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _lowerCamelCase() -> str: _lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , __UpperCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) # an empty list should also have zero inversions _lowerCAmelCase =[] _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) if __name__ == "__main__": main()
341
0
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=32 * 8 , __UpperCAmelCase=32 * 8 , __UpperCAmelCase=4 , __UpperCAmelCase=64 , ) -> Optional[int]: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =is_training _lowerCAmelCase =use_auxiliary_loss _lowerCAmelCase =num_queries _lowerCAmelCase =num_channels _lowerCAmelCase =min_size _lowerCAmelCase =max_size _lowerCAmelCase =num_labels _lowerCAmelCase =hidden_dim _lowerCAmelCase =hidden_dim def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( _A ) _lowerCAmelCase =torch.ones([self.batch_size, self.min_size, self.max_size] , device=_A ) _lowerCAmelCase =( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_A ) > 0.5 ).float() _lowerCAmelCase =(torch.rand((self.batch_size, self.num_labels) , device=_A ) > 0.5).long() _lowerCAmelCase =self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =MaskaFormerConfig( hidden_size=self.hidden_dim , ) _lowerCAmelCase =self.num_queries _lowerCAmelCase =self.num_labels _lowerCAmelCase =[1, 1, 1, 1] _lowerCAmelCase =self.num_channels _lowerCAmelCase =64 _lowerCAmelCase =1_28 _lowerCAmelCase =self.hidden_dim _lowerCAmelCase =self.hidden_dim _lowerCAmelCase =self.hidden_dim return config def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =self.prepare_config_and_inputs() _lowerCAmelCase ={'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: _lowerCAmelCase =output.encoder_hidden_states _lowerCAmelCase =output.pixel_decoder_hidden_states _lowerCAmelCase =output.transformer_decoder_hidden_states self.parent.assertTrue(len(_A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_A ) , config.decoder_layers ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> List[str]: with torch.no_grad(): _lowerCAmelCase =MaskaFormerModel(config=_A ) model.to(_A ) model.eval() _lowerCAmelCase =model(pixel_values=_A , pixel_mask=_A ) _lowerCAmelCase =model(_A , output_hidden_states=_A ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(_A , _A ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: _lowerCAmelCase =MaskaFormerForUniversalSegmentation(config=_A ) model.to(_A ) model.eval() def comm_check_on_output(__UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _lowerCAmelCase =model(pixel_values=_A , pixel_mask=_A ) _lowerCAmelCase =model(_A ) comm_check_on_output(_A ) _lowerCAmelCase =model( pixel_values=_A , pixel_mask=_A , mask_labels=_A , class_labels=_A ) comm_check_on_output(_A ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () lowerCamelCase = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {} lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =MaskaFormerModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=_A , has_text_modality=_A ) def _lowerCAmelCase ( self ) -> Any: self.config_tester.run_common_tests() def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_A , **_A , output_hidden_states=_A ) def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_A ) @unittest.skip(reason="""Mask2Former does not use inputs_embeds""" ) def _lowerCAmelCase ( self ) -> List[Any]: pass @unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" ) def _lowerCAmelCase ( self ) -> List[Any]: pass @unittest.skip(reason="""Mask2Former is not a generative model""" ) def _lowerCAmelCase ( self ) -> Optional[int]: pass @unittest.skip(reason="""Mask2Former does not use token embeddings""" ) def _lowerCAmelCase ( self ) -> str: pass @require_torch_multi_gpu @unittest.skip( reason="""Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`""" ) def _lowerCAmelCase ( self ) -> Tuple: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _lowerCAmelCase ( self ) -> List[str]: pass def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase =model_class(_A ) _lowerCAmelCase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase =[*signature.parameters.keys()] _lowerCAmelCase =['pixel_values'] self.assertListEqual(arg_names[:1] , _A ) @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: _lowerCAmelCase =MaskaFormerModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =(self.model_tester.min_size,) * 2 _lowerCAmelCase ={ 'pixel_values': torch.randn((2, 3, *size) , device=_A ), 'mask_labels': torch.randn((2, 10, *size) , device=_A ), 'class_labels': torch.zeros(2 , 10 , device=_A ).long(), } _lowerCAmelCase =self.model_tester.get_config() _lowerCAmelCase =MaskaFormerForUniversalSegmentation(_A ).to(_A ) _lowerCAmelCase =model(**_A ) self.assertTrue(outputs.loss is not None ) def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_A , **_A , output_hidden_states=_A ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase =model_class(_A ).to(_A ) _lowerCAmelCase =model(**_A , output_attentions=_A ) self.assertTrue(outputs.attentions is not None ) def _lowerCAmelCase ( self ) -> Union[str, Any]: if not self.model_tester.is_training: return _lowerCAmelCase =self.all_model_classes[1] _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() _lowerCAmelCase =model_class(_A ) model.to(_A ) model.train() _lowerCAmelCase =model(_A , mask_labels=_A , class_labels=_A ).loss loss.backward() def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =self.all_model_classes[1] _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() _lowerCAmelCase =True _lowerCAmelCase =True _lowerCAmelCase =model_class(_A ).to(_A ) model.train() _lowerCAmelCase =model(_A , mask_labels=_A , class_labels=_A ) _lowerCAmelCase =outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCAmelCase =outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() _lowerCAmelCase =outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCAmelCase =outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=_A ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1E-4 def _lowerCamelCase() -> List[Any]: _lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCAmelCase ( self ) -> Any: return "facebook/mask2former-swin-small-coco-instance" @cached_property def _lowerCAmelCase ( self ) -> Any: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_A ) _lowerCAmelCase =self.default_image_processor _lowerCAmelCase =prepare_img() _lowerCAmelCase =image_processor(_A , return_tensors="""pt""" ).to(_A ) _lowerCAmelCase =inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_A , (1, 3, 3_84, 3_84) ) with torch.no_grad(): _lowerCAmelCase =model(**_A ) _lowerCAmelCase =torch.tensor( [[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_A ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , _A , atol=_A ) ) _lowerCAmelCase =torch.tensor( [[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_A ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _A , atol=_A ) ) _lowerCAmelCase =torch.tensor( [[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_A ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _A , atol=_A ) ) def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_A ).eval() _lowerCAmelCase =self.default_image_processor _lowerCAmelCase =prepare_img() _lowerCAmelCase =image_processor(_A , return_tensors="""pt""" ).to(_A ) _lowerCAmelCase =inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_A , (1, 3, 3_84, 3_84) ) with torch.no_grad(): _lowerCAmelCase =model(**_A ) # masks_queries_logits _lowerCAmelCase =outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) _lowerCAmelCase =[ [-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1], [-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1], [-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5], ] _lowerCAmelCase =torch.tensor(_A ).to(_A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _A , atol=_A ) ) # class_queries_logits _lowerCAmelCase =outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) _lowerCAmelCase =torch.tensor( [ [1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2], [0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3], [0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5], ] ).to(_A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=_A ) ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_A ).eval() _lowerCAmelCase =self.default_image_processor _lowerCAmelCase =image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , ) _lowerCAmelCase =inputs['pixel_values'].to(_A ) _lowerCAmelCase =[el.to(_A ) for el in inputs['mask_labels']] _lowerCAmelCase =[el.to(_A ) for el in inputs['class_labels']] with torch.no_grad(): _lowerCAmelCase =model(**_A ) self.assertTrue(outputs.loss is not None )
357
"""simple docstring""" import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = True lowerCamelCase = None lowerCamelCase = 1 lowerCamelCase = None lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None def _lowerCAmelCase ( self ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
341
0
from __future__ import annotations def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> str: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative in a semiconductor""" ) elif hole_conc < 0: raise ValueError("""Hole concentration cannot be negative in a semiconductor""" ) elif intrinsic_conc < 0: raise ValueError( """Intrinsic concentration cannot be negative in a semiconductor""" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
358
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int: return int((input_a, input_a).count(1 ) != 0 ) def _lowerCamelCase() -> None: assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
341
0
"""simple docstring""" import argparse import math import traceback import dateutil.parser as date_parser import requests def _lowerCamelCase(__UpperCamelCase ) -> Optional[int]: _lowerCAmelCase ={} _lowerCAmelCase =job["""started_at"""] _lowerCAmelCase =job["""completed_at"""] _lowerCAmelCase =date_parser.parse(lowerCAmelCase__ ) _lowerCAmelCase =date_parser.parse(lowerCAmelCase__ ) _lowerCAmelCase =round((end_datetime - start_datetime).total_seconds() / 60.0 ) _lowerCAmelCase =start _lowerCAmelCase =end _lowerCAmelCase =duration_in_min return job_info def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=None ) -> str: _lowerCAmelCase =None if token is not None: _lowerCAmelCase ={"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''} _lowerCAmelCase =F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' _lowerCAmelCase =requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json() _lowerCAmelCase ={} try: job_time.update({job["""name"""]: extract_time_from_single_job(lowerCAmelCase__ ) for job in result["""jobs"""]} ) _lowerCAmelCase =math.ceil((result["""total_count"""] - 100) / 100 ) for i in range(lowerCAmelCase__ ): _lowerCAmelCase =requests.get(url + F'''&page={i + 2}''' , headers=lowerCAmelCase__ ).json() job_time.update({job["""name"""]: extract_time_from_single_job(lowerCAmelCase__ ) for job in result["""jobs"""]} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') __A = parser.parse_args() __A = get_job_time(args.workflow_run_id) __A = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F"""{k}: {v["duration"]}""")
359
"""simple docstring""" import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py __A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' __A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' __A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple: _lowerCAmelCase =compute_bleu( reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
341
0
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __A = logging.get_logger(__name__) class lowerCamelCase__ ( __a ): '''simple docstring''' def __init__( self , __UpperCAmelCase ) -> Dict: super().__init__() _lowerCAmelCase =nn.ModuleList(a__ ) def __UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = True , ) -> Optional[int]: for i, (image, scale, controlnet) in enumerate(zip(a__ , a__ , self.nets ) ): _lowerCAmelCase , _lowerCAmelCase =controlnet( a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) # merge samples if i == 0: _lowerCAmelCase , _lowerCAmelCase =down_samples, mid_sample else: _lowerCAmelCase =[ samples_prev + samples_curr for samples_prev, samples_curr in zip(a__ , a__ ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Optional[int]: _lowerCAmelCase =0 _lowerCAmelCase =save_directory for controlnet in self.nets: controlnet.save_pretrained( a__ , is_main_process=a__ , save_function=a__ , safe_serialization=a__ , variant=a__ , ) idx += 1 _lowerCAmelCase =model_path_to_save + f'''_{idx}''' @classmethod def __UpperCAmelCase ( cls , __UpperCAmelCase , **__UpperCAmelCase ) -> str: _lowerCAmelCase =0 _lowerCAmelCase =[] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... _lowerCAmelCase =pretrained_model_path while os.path.isdir(a__ ): _lowerCAmelCase =ControlNetModel.from_pretrained(a__ , **a__ ) controlnets.append(a__ ) idx += 1 _lowerCAmelCase =pretrained_model_path + f'''_{idx}''' logger.info(f'''{len(a__ )} controlnets loaded from {pretrained_model_path}.''' ) if len(a__ ) == 0: raise ValueError( f'''No ControlNets found under {os.path.dirname(a__ )}. Expected at least {pretrained_model_path + '_0'}.''' ) return cls(a__ )
360
"""simple docstring""" import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=512, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def _lowerCamelCase(__UpperCamelCase ) -> List[str]: if string == "True": return True elif string == "False": return False else: raise ValueError(F'''could not parse string as bool {string}''' ) parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) __A = parser.parse_args() __A = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
341
0
"""simple docstring""" import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) __A = { "iou_prediction_head.layers.0": "iou_prediction_head.proj_in", "iou_prediction_head.layers.1": "iou_prediction_head.layers.0", "iou_prediction_head.layers.2": "iou_prediction_head.proj_out", "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1", "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm", "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2", "mask_downscaling.0": "mask_embed.conv1", "mask_downscaling.1": "mask_embed.layer_norm1", "mask_downscaling.3": "mask_embed.conv2", "mask_downscaling.4": "mask_embed.layer_norm2", "mask_downscaling.6": "mask_embed.conv3", "point_embeddings": "point_embed", "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding", "image_encoder": "vision_encoder", "neck.0": "neck.conv1", "neck.1": "neck.layer_norm1", "neck.2": "neck.conv2", "neck.3": "neck.layer_norm2", "patch_embed.proj": "patch_embed.projection", ".norm": ".layer_norm", "blocks": "layers", } def _lowerCamelCase(__UpperCamelCase ) -> List[Any]: _lowerCAmelCase ={} state_dict.pop("""pixel_mean""" , __UpperCamelCase ) state_dict.pop("""pixel_std""" , __UpperCamelCase ) _lowerCAmelCase =r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*''' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _lowerCAmelCase =key.replace(__UpperCamelCase , __UpperCamelCase ) if re.match(__UpperCamelCase , __UpperCamelCase ): _lowerCAmelCase =int(re.match(__UpperCamelCase , __UpperCamelCase ).group(2 ) ) if layer_nb == 0: _lowerCAmelCase =key.replace("""layers.0""" , """proj_in""" ) elif layer_nb == 1: _lowerCAmelCase =key.replace("""layers.1""" , """layers.0""" ) elif layer_nb == 2: _lowerCAmelCase =key.replace("""layers.2""" , """proj_out""" ) _lowerCAmelCase =value _lowerCAmelCase =model_state_dict[ '''prompt_encoder.shared_embedding.positional_embedding''' ] return model_state_dict def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="ybelkada/segment-anything" ) -> Optional[int]: _lowerCAmelCase =hf_hub_download(__UpperCamelCase , F'''checkpoints/{model_name}.pth''' ) if "sam_vit_b" in model_name: _lowerCAmelCase =SamConfig() elif "sam_vit_l" in model_name: _lowerCAmelCase =SamVisionConfig( hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) _lowerCAmelCase =SamConfig( vision_config=__UpperCamelCase , ) elif "sam_vit_h" in model_name: _lowerCAmelCase =SamVisionConfig( hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) _lowerCAmelCase =SamConfig( vision_config=__UpperCamelCase , ) _lowerCAmelCase =torch.load(__UpperCamelCase , map_location="""cpu""" ) _lowerCAmelCase =replace_keys(__UpperCamelCase ) _lowerCAmelCase =SamImageProcessor() _lowerCAmelCase =SamProcessor(image_processor=__UpperCamelCase ) _lowerCAmelCase =SamModel(__UpperCamelCase ) hf_model.load_state_dict(__UpperCamelCase ) _lowerCAmelCase =hf_model.to("""cuda""" ) _lowerCAmelCase ='''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png''' _lowerCAmelCase =Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert("""RGB""" ) _lowerCAmelCase =[[[400, 650]]] _lowerCAmelCase =[[1]] _lowerCAmelCase =processor(images=np.array(__UpperCamelCase ) , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): _lowerCAmelCase =hf_model(**__UpperCamelCase ) _lowerCAmelCase =output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.5_79_89_02_51_15_96_68 _lowerCAmelCase =processor( images=np.array(__UpperCamelCase ) , input_points=__UpperCamelCase , input_labels=__UpperCamelCase , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): _lowerCAmelCase =hf_model(**__UpperCamelCase ) _lowerCAmelCase =output.iou_scores.squeeze() assert scores[-1].item() == 0.97_12_60_30_92_19_36_04 _lowerCAmelCase =((75, 275, 1725, 850),) _lowerCAmelCase =processor(images=np.array(__UpperCamelCase ) , input_boxes=__UpperCamelCase , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): _lowerCAmelCase =hf_model(**__UpperCamelCase ) _lowerCAmelCase =output.iou_scores.squeeze() assert scores[-1].item() == 0.86_86_01_56_05_92_65_14 # Test with 2 points and 1 image. _lowerCAmelCase =[[[400, 650], [800, 650]]] _lowerCAmelCase =[[1, 1]] _lowerCAmelCase =processor( images=np.array(__UpperCamelCase ) , input_points=__UpperCamelCase , input_labels=__UpperCamelCase , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): _lowerCAmelCase =hf_model(**__UpperCamelCase ) _lowerCAmelCase =output.iou_scores.squeeze() assert scores[-1].item() == 0.99_36_04_77_92_43_46_92 if __name__ == "__main__": __A = argparse.ArgumentParser() __A = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"] parser.add_argument( '--model_name', default='sam_vit_h_4b8939', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) parser.add_argument( '--model_hub_id', default='ybelkada/segment-anything', choices=choices, type=str, help='Path to hf config.json of model to convert', ) __A = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
361
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __A = { 'configuration_audio_spectrogram_transformer': [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ASTConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ASTForAudioClassification', 'ASTModel', 'ASTPreTrainedModel', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['ASTFeatureExtractor'] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
"""simple docstring""" import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __A = logging.get_logger(__name__) __A = { 'microsoft/conditional-detr-resnet-50': ( 'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json' ), } class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''conditional_detr''' lowerCamelCase = ['''past_key_values'''] lowerCamelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=3 , __UpperCAmelCase=3_00 , __UpperCAmelCase=6 , __UpperCAmelCase=20_48 , __UpperCAmelCase=8 , __UpperCAmelCase=6 , __UpperCAmelCase=20_48 , __UpperCAmelCase=8 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=2_56 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1.0 , __UpperCAmelCase=False , __UpperCAmelCase="sine" , __UpperCAmelCase="resnet50" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=0.2_5 , **__UpperCAmelCase , ) -> int: if backbone_config is not None and use_timm_backbone: raise ValueError("""You can\'t specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) _lowerCAmelCase =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(__a , __a ): _lowerCAmelCase =backbone_config.get("""model_type""" ) _lowerCAmelCase =CONFIG_MAPPING[backbone_model_type] _lowerCAmelCase =config_class.from_dict(__a ) _lowerCAmelCase =use_timm_backbone _lowerCAmelCase =backbone_config _lowerCAmelCase =num_channels _lowerCAmelCase =num_queries _lowerCAmelCase =d_model _lowerCAmelCase =encoder_ffn_dim _lowerCAmelCase =encoder_layers _lowerCAmelCase =encoder_attention_heads _lowerCAmelCase =decoder_ffn_dim _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =dropout _lowerCAmelCase =attention_dropout _lowerCAmelCase =activation_dropout _lowerCAmelCase =activation_function _lowerCAmelCase =init_std _lowerCAmelCase =init_xavier_std _lowerCAmelCase =encoder_layerdrop _lowerCAmelCase =decoder_layerdrop _lowerCAmelCase =encoder_layers _lowerCAmelCase =auxiliary_loss _lowerCAmelCase =position_embedding_type _lowerCAmelCase =backbone _lowerCAmelCase =use_pretrained_backbone _lowerCAmelCase =dilation # Hungarian matcher _lowerCAmelCase =class_cost _lowerCAmelCase =bbox_cost _lowerCAmelCase =giou_cost # Loss coefficients _lowerCAmelCase =mask_loss_coefficient _lowerCAmelCase =dice_loss_coefficient _lowerCAmelCase =cls_loss_coefficient _lowerCAmelCase =bbox_loss_coefficient _lowerCAmelCase =giou_loss_coefficient _lowerCAmelCase =focal_alpha super().__init__(is_encoder_decoder=__a , **__a ) @property def _lowerCAmelCase ( self ) -> int: return self.encoder_attention_heads @property def _lowerCAmelCase ( self ) -> int: return self.d_model def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _lowerCAmelCase =self.backbone_config.to_dict() _lowerCAmelCase =self.__class__.model_type return output class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = version.parse('''1.11''' ) @property def _lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def _lowerCAmelCase ( self ) -> float: return 1e-5 @property def _lowerCAmelCase ( self ) -> int: return 12
362
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { 'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'], 'tokenization_m2m_100': ['M2M100Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST', 'M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase = 1000 ) -> int: return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
363
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __A = datasets.logging.get_logger(__name__) __A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' __A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' __A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict: _lowerCAmelCase ={doc: key_lines} _lowerCAmelCase ={doc: sys_lines} _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase ) key_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase ) sys_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) if remove_nested: _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' ) logger.info( """Number of resulting singleton clusters in the key """ F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' ) if not keep_singletons: logger.info( F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' """files, respectively""" ) return doc_coref_infos def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: _lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 for name, metric in metrics: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} ) logger.info( name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , ) if conll_subparts_num == 3: _lowerCAmelCase =(conll / 3) * 100 logger.info(F'''CoNLL score: {conll:.2f}''' ) output_scores.update({"""conll_score""": conll} ) return output_scores def _lowerCamelCase(__UpperCamelCase ) -> Tuple: _lowerCAmelCase =False for line in key_lines: if not line.startswith("""#""" ): if len(line.split() ) > 6: _lowerCAmelCase =line.split()[5] if not parse_col == "-": _lowerCAmelCase =True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]: _lowerCAmelCase =[ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: _lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _lowerCAmelCase =evaluate( key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , ) return score
341
0
"""simple docstring""" import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters __A = logging.get_logger(__name__) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None ) -> int: # Recurse if needed if "." in tensor_name: _lowerCAmelCase =tensor_name.split(""".""" ) for split in splits[:-1]: _lowerCAmelCase =getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if new_module is None: raise ValueError(F'''{module} has no attribute {split}.''' ) _lowerCAmelCase =new_module _lowerCAmelCase =splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' ) _lowerCAmelCase =tensor_name in module._buffers _lowerCAmelCase =getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None: raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' ) _lowerCAmelCase =False _lowerCAmelCase =False if is_buffer or not is_bitsandbytes_available(): _lowerCAmelCase =False _lowerCAmelCase =False else: _lowerCAmelCase =hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) _lowerCAmelCase =isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: _lowerCAmelCase =module._parameters[tensor_name] if param.device.type != "cuda": if value is None: _lowerCAmelCase =old_value.to(SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): _lowerCAmelCase =value.to("""cpu""" ) if value.dtype == torch.inta: _lowerCAmelCase =version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse( """0.37.2""" ) if not is_abit_serializable: raise ValueError( """Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """ """Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" ) else: _lowerCAmelCase =torch.tensor(SCREAMING_SNAKE_CASE_ , device="""cpu""" ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , SCREAMING_SNAKE_CASE_ ) and fpaa_statistics is None: _lowerCAmelCase =new_value.T _lowerCAmelCase =old_value.__dict__ if is_abit: _lowerCAmelCase =bnb.nn.IntaParams(SCREAMING_SNAKE_CASE_ , requires_grad=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) elif is_abit: _lowerCAmelCase =bnb.nn.Paramsabit(SCREAMING_SNAKE_CASE_ , requires_grad=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =new_value if fpaa_statistics is not None: setattr(module.weight , """SCB""" , fpaa_statistics.to(SCREAMING_SNAKE_CASE_ ) ) else: if value is None: _lowerCAmelCase =old_value.to(SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): _lowerCAmelCase =value.to(SCREAMING_SNAKE_CASE_ ) else: _lowerCAmelCase =torch.tensor(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ ) if is_buffer: _lowerCAmelCase =new_value else: _lowerCAmelCase =nn.Parameter(SCREAMING_SNAKE_CASE_ , requires_grad=old_value.requires_grad ) _lowerCAmelCase =new_value def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False ) -> Any: for name, module in model.named_children(): if current_key_name is None: _lowerCAmelCase =[] current_key_name.append(SCREAMING_SNAKE_CASE_ ) if (isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) or isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in """.""".join(SCREAMING_SNAKE_CASE_ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase , _lowerCAmelCase =module.weight.shape else: _lowerCAmelCase =module.in_features _lowerCAmelCase =module.out_features if quantization_config.quantization_method() == "llm_int8": _lowerCAmelCase =bnb.nn.LinearabitLt( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) _lowerCAmelCase =True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: _lowerCAmelCase =bnb.nn.Linearabit( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) _lowerCAmelCase =True # Store the module class in case we need to transpose the weight later _lowerCAmelCase =type(SCREAMING_SNAKE_CASE_ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(SCREAMING_SNAKE_CASE_ ) if len(list(module.children() ) ) > 0: _lowerCAmelCase , _lowerCAmelCase =_replace_with_bnb_linear( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , has_been_replaced=SCREAMING_SNAKE_CASE_ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]: _lowerCAmelCase =["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert _lowerCAmelCase , _lowerCAmelCase =_replace_with_bnb_linear( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def _lowerCamelCase(*__UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]: warnings.warn( """`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , SCREAMING_SNAKE_CASE_ , ) return replace_with_bnb_linear(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _lowerCamelCase(*__UpperCamelCase , **__UpperCamelCase ) -> Optional[int]: warnings.warn( """`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , SCREAMING_SNAKE_CASE_ , ) return set_module_quantized_tensor_to_device(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _lowerCamelCase(__UpperCamelCase ) -> Optional[int]: _lowerCAmelCase =deepcopy(SCREAMING_SNAKE_CASE_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() _lowerCAmelCase =find_tied_parameters(SCREAMING_SNAKE_CASE_ ) # For compatibility with Accelerate < 0.18 if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: _lowerCAmelCase =sum(SCREAMING_SNAKE_CASE_ , [] ) _lowerCAmelCase =len(SCREAMING_SNAKE_CASE_ ) > 0 # Check if it is a base model _lowerCAmelCase =not hasattr(SCREAMING_SNAKE_CASE_ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head _lowerCAmelCase =list(model.named_children() ) _lowerCAmelCase =[list_modules[-1][0]] # add last module together with tied weights _lowerCAmelCase =set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =list(set(SCREAMING_SNAKE_CASE_ ) ) + list(SCREAMING_SNAKE_CASE_ ) # remove ".weight" from the keys _lowerCAmelCase =[""".weight""", """.bias"""] _lowerCAmelCase =[] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: _lowerCAmelCase =name.replace(SCREAMING_SNAKE_CASE_ , """""" ) filtered_module_names.append(SCREAMING_SNAKE_CASE_ ) return filtered_module_names
364
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = XGLMConfig lowerCamelCase = {} lowerCamelCase = '''gelu''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_input_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =ffn_dim _lowerCAmelCase =activation_function _lowerCAmelCase =activation_dropout _lowerCAmelCase =attention_dropout _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =initializer_range _lowerCAmelCase =None _lowerCAmelCase =0 _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Dict: return XGLMConfig.from_pretrained("""facebook/xglm-564M""" ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _lowerCAmelCase =None if self.use_input_mask: _lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase =self.get_config() _lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowerCAmelCase ( self ) -> str: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) =config_and_inputs _lowerCAmelCase ={ """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCamelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =TFXGLMModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 ) def _lowerCAmelCase ( self ) -> int: self.config_tester.run_common_tests() @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" ) def _lowerCAmelCase ( self ) -> Union[str, Any]: super().test_resize_token_embeddings() @require_tf class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) tf.random.set_seed(0 ) _lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" ) _lowerCAmelCase =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0""" ): _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] ) _lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase ="""left""" # use different length sentences to test batching _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase ) _lowerCAmelCase =inputs["""input_ids"""] _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
341
0
"""simple docstring""" import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]: _lowerCAmelCase =torch.exp(snake_case__ ) _lowerCAmelCase =torch.sum(snake_case__ , dim=1 ) # sum of exp(x_i) _lowerCAmelCase =torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(snake_case__ ) - B / A class lowerCamelCase__ ( nn.Module ): '''simple docstring''' def __init__( self , __UpperCAmelCase ) -> List[Any]: super().__init__() _lowerCAmelCase =config.output_attentions _lowerCAmelCase =config.output_hidden_states _lowerCAmelCase =nn.ModuleList([BertLayer(SCREAMING_SNAKE_CASE_ ) for _ in range(config.num_hidden_layers )] ) _lowerCAmelCase =nn.ModuleList([BertHighway(SCREAMING_SNAKE_CASE_ ) for _ in range(config.num_hidden_layers )] ) _lowerCAmelCase =[-1 for _ in range(config.num_hidden_layers )] def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Dict: if (type(SCREAMING_SNAKE_CASE_ ) is float) or (type(SCREAMING_SNAKE_CASE_ ) is int): for i in range(len(self.early_exit_entropy ) ): _lowerCAmelCase =x else: _lowerCAmelCase =x def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase =pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> List[Any]: _lowerCAmelCase =() _lowerCAmelCase =() _lowerCAmelCase =() for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: _lowerCAmelCase =all_hidden_states + (hidden_states,) _lowerCAmelCase =layer_module( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , head_mask[i] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =layer_outputs[0] if self.output_attentions: _lowerCAmelCase =all_attentions + (layer_outputs[1],) _lowerCAmelCase =(hidden_states,) if self.output_hidden_states: _lowerCAmelCase =current_outputs + (all_hidden_states,) if self.output_attentions: _lowerCAmelCase =current_outputs + (all_attentions,) _lowerCAmelCase =self.highway[i](SCREAMING_SNAKE_CASE_ ) # logits, pooled_output if not self.training: _lowerCAmelCase =highway_exit[0] _lowerCAmelCase =entropy(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy _lowerCAmelCase =all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: _lowerCAmelCase =(highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(SCREAMING_SNAKE_CASE_ , i + 1 ) else: _lowerCAmelCase =all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: _lowerCAmelCase =all_hidden_states + (hidden_states,) _lowerCAmelCase =(hidden_states,) if self.output_hidden_states: _lowerCAmelCase =outputs + (all_hidden_states,) if self.output_attentions: _lowerCAmelCase =outputs + (all_attentions,) _lowerCAmelCase =outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( '''The Bert Model transformer with early exiting (DeeBERT). ''' , a__ , ) class lowerCamelCase__ ( a__ ): '''simple docstring''' def __init__( self , __UpperCAmelCase ) -> List[str]: super().__init__(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =config _lowerCAmelCase =BertEmbeddings(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =DeeBertEncoder(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =BertPooler(SCREAMING_SNAKE_CASE_ ) self.init_weights() def _lowerCAmelCase ( self ) -> Optional[Any]: self.encoder.init_highway_pooler(self.pooler ) def _lowerCAmelCase ( self ) -> Any: return self.embeddings.word_embeddings def _lowerCAmelCase ( self , __UpperCAmelCase ) -> int: _lowerCAmelCase =value def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Dict: for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(SCREAMING_SNAKE_CASE_ ) @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) def _lowerCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> int: if input_ids is not None and inputs_embeds is not None: raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" ) elif input_ids is not None: _lowerCAmelCase =input_ids.size() elif inputs_embeds is not None: _lowerCAmelCase =inputs_embeds.size()[:-1] else: raise ValueError("""You have to specify either input_ids or inputs_embeds""" ) _lowerCAmelCase =input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: _lowerCAmelCase =torch.ones(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ ) if encoder_attention_mask is None: _lowerCAmelCase =torch.ones(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ ) if token_type_ids is None: _lowerCAmelCase =torch.zeros(SCREAMING_SNAKE_CASE_ , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. _lowerCAmelCase =self.get_extended_attention_mask(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: _lowerCAmelCase =encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: _lowerCAmelCase =encoder_attention_mask[:, None, None, :] _lowerCAmelCase =encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility _lowerCAmelCase =(1.0 - encoder_extended_attention_mask) * -1_00_00.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] _lowerCAmelCase =self.get_head_mask(SCREAMING_SNAKE_CASE_ , self.config.num_hidden_layers ) _lowerCAmelCase =self.embeddings( input_ids=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =self.encoder( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , ) _lowerCAmelCase =encoder_outputs[0] _lowerCAmelCase =self.pooler(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class lowerCamelCase__ ( a__ ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: _lowerCAmelCase =message _lowerCAmelCase =exit_layer # start from 1! class lowerCamelCase__ ( nn.Module ): '''simple docstring''' def __init__( self , __UpperCAmelCase ) -> Dict: super().__init__() _lowerCAmelCase =BertPooler(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =nn.Dropout(config.hidden_dropout_prob ) _lowerCAmelCase =nn.Linear(config.hidden_size , config.num_labels ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Dict: # Pooler _lowerCAmelCase =encoder_outputs[0] _lowerCAmelCase =self.pooler(SCREAMING_SNAKE_CASE_ ) # "return" pooler_output # BertModel _lowerCAmelCase =(pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification _lowerCAmelCase =bmodel_output[1] _lowerCAmelCase =self.dropout(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =self.classifier(SCREAMING_SNAKE_CASE_ ) return logits, pooled_output @add_start_docstrings( '''Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ''' , a__ , ) class lowerCamelCase__ ( a__ ): '''simple docstring''' def __init__( self , __UpperCAmelCase ) -> Any: super().__init__(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =config.num_labels _lowerCAmelCase =config.num_hidden_layers _lowerCAmelCase =DeeBertModel(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =nn.Dropout(config.hidden_dropout_prob ) _lowerCAmelCase =nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) def _lowerCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=-1 , __UpperCAmelCase=False , ) -> Optional[int]: _lowerCAmelCase =self.num_layers try: _lowerCAmelCase =self.bert( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_ , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits _lowerCAmelCase =outputs[1] _lowerCAmelCase =self.dropout(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =self.classifier(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =(logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _lowerCAmelCase =e.message _lowerCAmelCase =e.exit_layer _lowerCAmelCase =outputs[0] if not self.training: _lowerCAmelCase =entropy(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase =[] _lowerCAmelCase =[] if labels is not None: if self.num_labels == 1: # We are doing regression _lowerCAmelCase =MSELoss() _lowerCAmelCase =loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _lowerCAmelCase =CrossEntropyLoss() _lowerCAmelCase =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _lowerCAmelCase =[] for highway_exit in outputs[-1]: _lowerCAmelCase =highway_exit[0] if not self.training: highway_logits_all.append(SCREAMING_SNAKE_CASE_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _lowerCAmelCase =MSELoss() _lowerCAmelCase =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _lowerCAmelCase =CrossEntropyLoss() _lowerCAmelCase =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(SCREAMING_SNAKE_CASE_ ) if train_highway: _lowerCAmelCase =(sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _lowerCAmelCase =(loss,) + outputs if not self.training: _lowerCAmelCase =outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _lowerCAmelCase =( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
365
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __A = logging.get_logger(__name__) __A = {'vocab_file': 'spiece.model'} __A = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } __A = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) __A = 0 __A = 1 __A = 2 __A = 3 __A = 4 class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = '''left''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =3 _lowerCAmelCase =do_lower_case _lowerCAmelCase =remove_space _lowerCAmelCase =keep_accents _lowerCAmelCase =vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> str: return len(self.sp_model ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[int]: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None return state def __setstate__( self , __UpperCAmelCase ) -> Tuple: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]: if self.remove_space: _lowerCAmelCase =""" """.join(inputs.strip().split() ) else: _lowerCAmelCase =inputs _lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: _lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase ) _lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] ) if self.do_lower_case: _lowerCAmelCase =outputs.lower() return outputs def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: _lowerCAmelCase =self.preprocess_text(__UpperCAmelCase ) _lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) _lowerCAmelCase =[] for piece in pieces: if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): _lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _lowerCAmelCase =cur_pieces[1:] else: _lowerCAmelCase =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCAmelCase ) else: new_pieces.append(__UpperCAmelCase ) return new_pieces def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]: return self.sp_model.PieceToId(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.IdToPiece(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str: _lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase ) _lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _lowerCAmelCase =[] _lowerCAmelCase =[] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) _lowerCAmelCase =[] sub_texts.append(__UpperCAmelCase ) else: current_sub_text.append(__UpperCAmelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens _lowerCAmelCase ="""""".join(__UpperCAmelCase ) _lowerCAmelCase =( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase ) return clean_text else: return text def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] return ([0] * len(__UpperCAmelCase )) + [1, 1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
341
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __A = logging.get_logger(__name__) __A = { "facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json", } class lowerCamelCase__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase = '''convnextv2''' def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , __UpperCAmelCase=0.0 , __UpperCAmelCase=2_24 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Union[str, Any]: super().__init__(**__UpperCAmelCase ) _lowerCAmelCase =num_channels _lowerCAmelCase =patch_size _lowerCAmelCase =num_stages _lowerCAmelCase =[96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes _lowerCAmelCase =[3, 3, 9, 3] if depths is None else depths _lowerCAmelCase =hidden_act _lowerCAmelCase =initializer_range _lowerCAmelCase =layer_norm_eps _lowerCAmelCase =drop_path_rate _lowerCAmelCase =image_size _lowerCAmelCase =["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] _lowerCAmelCase , _lowerCAmelCase =get_aligned_output_features_output_indices( out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
366
"""simple docstring""" from __future__ import annotations def _lowerCamelCase(__UpperCamelCase ) -> bool: _lowerCAmelCase =str(__UpperCamelCase ) return n == n[::-1] def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str: _lowerCAmelCase =0 for i in range(1 , __UpperCamelCase ): if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
341
0
"""simple docstring""" from graphs.minimum_spanning_tree_kruskal import kruskal def _lowerCamelCase() -> List[Any]: _lowerCAmelCase =9 _lowerCAmelCase =[ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _lowerCAmelCase =kruskal(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =[ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(__UpperCamelCase ) == sorted(__UpperCamelCase )
367
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''llama''' lowerCamelCase = ['''past_key_values'''] def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: _lowerCAmelCase =vocab_size _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =hidden_size _lowerCAmelCase =intermediate_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads # for backward compatibility if num_key_value_heads is None: _lowerCAmelCase =num_attention_heads _lowerCAmelCase =num_key_value_heads _lowerCAmelCase =hidden_act _lowerCAmelCase =initializer_range _lowerCAmelCase =rms_norm_eps _lowerCAmelCase =pretraining_tp _lowerCAmelCase =use_cache _lowerCAmelCase =rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> str: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f'''got {self.rope_scaling}''' ) _lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase ) _lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
341
0
"""simple docstring""" import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() __A = 2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model __A = { # fairseq: 'wmt19-ru-en': {'length_penalty': 1.1}, 'wmt19-en-ru': {'length_penalty': 1.15}, 'wmt19-en-de': {'length_penalty': 1.0}, 'wmt19-de-en': {'length_penalty': 1.1}, # allenai: 'wmt16-en-de-dist-12-1': {'length_penalty': 0.6}, 'wmt16-en-de-dist-6-1': {'length_penalty': 0.6}, 'wmt16-en-de-12-1': {'length_penalty': 0.8}, 'wmt19-de-en-6-6-base': {'length_penalty': 0.6}, 'wmt19-de-en-6-6-big': {'length_penalty': 0.6}, } # this remaps the different models to their organization names __A = {} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: __A = 'facebook' for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: __A = 'allenai' def _lowerCamelCase(__UpperCamelCase ) -> Dict: _lowerCAmelCase =dict((re.sub(R"""@@$""" , """""" , snake_case_ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , snake_case_ ), v) for k, v in d.items() ) _lowerCAmelCase ="""<s> <pad> </s> <unk>""".split() # restore the special tokens for k in keep_keys: del da[F'''{k}</w>'''] _lowerCAmelCase =d[k] # restore return da def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[int]: assert os.path.exists(snake_case_ ) os.makedirs(snake_case_ , exist_ok=snake_case_ ) print(F'''Writing results to {pytorch_dump_folder_path}''' ) # handle various types of models _lowerCAmelCase =basename(snake_case_ ) _lowerCAmelCase =dirname(snake_case_ ) _lowerCAmelCase =fairseq.model_parallel.models.transformer.ModelParallelTransformerModel _lowerCAmelCase =cls.hub_models() _lowerCAmelCase ={"""bpe""": """fastbpe""", """tokenizer""": """moses"""} _lowerCAmelCase =""".""" # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(F'''using checkpoint {checkpoint_file}''' ) _lowerCAmelCase =hub_utils.from_pretrained( snake_case_ , snake_case_ , snake_case_ , archive_map=snake_case_ , **snake_case_ ) _lowerCAmelCase =vars(chkpt["""args"""]["""model"""] ) _lowerCAmelCase =args["""source_lang"""] _lowerCAmelCase =args["""target_lang"""] _lowerCAmelCase =dirname(snake_case_ ) _lowerCAmelCase =basename(snake_case_ ) # dicts _lowerCAmelCase =os.path.join(snake_case_ , F'''dict.{src_lang}.txt''' ) _lowerCAmelCase =os.path.join(snake_case_ , F'''dict.{tgt_lang}.txt''' ) _lowerCAmelCase =Dictionary.load(snake_case_ ) _lowerCAmelCase =rewrite_dict_keys(src_dict.indices ) _lowerCAmelCase =len(snake_case_ ) _lowerCAmelCase =os.path.join(snake_case_ , """vocab-src.json""" ) print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' ) with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(snake_case_ , ensure_ascii=snake_case_ , indent=snake_case_ ) ) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab _lowerCAmelCase =True for k in src_vocab.keys(): if not k.islower(): _lowerCAmelCase =False break _lowerCAmelCase =Dictionary.load(snake_case_ ) _lowerCAmelCase =rewrite_dict_keys(tgt_dict.indices ) _lowerCAmelCase =len(snake_case_ ) _lowerCAmelCase =os.path.join(snake_case_ , """vocab-tgt.json""" ) print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' ) with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(snake_case_ , ensure_ascii=snake_case_ , indent=snake_case_ ) ) # merges_file (bpecodes) _lowerCAmelCase =os.path.join(snake_case_ , VOCAB_FILES_NAMES["""merges_file"""] ) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" _lowerCAmelCase =os.path.join(snake_case_ , snake_case_ ) if os.path.exists(snake_case_ ): break with open(snake_case_ , encoding="""utf-8""" ) as fin: _lowerCAmelCase =fin.read() _lowerCAmelCase =re.sub(R""" \d+$""" , """""" , snake_case_ , 0 , re.M ) # remove frequency number print(F'''Generating {merges_file}''' ) with open(snake_case_ , """w""" , encoding="""utf-8""" ) as fout: fout.write(snake_case_ ) # model config _lowerCAmelCase =os.path.join(snake_case_ , """config.json""" ) # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args['bpe']}''' assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args['tokenizer']}''' _lowerCAmelCase ={ """architectures""": ["""FSMTForConditionalGeneration"""], """model_type""": """fsmt""", """activation_dropout""": args["""activation_dropout"""], """activation_function""": """relu""", """attention_dropout""": args["""attention_dropout"""], """d_model""": args["""decoder_embed_dim"""], """dropout""": args["""dropout"""], """init_std""": 0.02, """max_position_embeddings""": args["""max_source_positions"""], """num_hidden_layers""": args["""encoder_layers"""], """src_vocab_size""": src_vocab_size, """tgt_vocab_size""": tgt_vocab_size, """langs""": [src_lang, tgt_lang], """encoder_attention_heads""": args["""encoder_attention_heads"""], """encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""], """encoder_layerdrop""": args["""encoder_layerdrop"""], """encoder_layers""": args["""encoder_layers"""], """decoder_attention_heads""": args["""decoder_attention_heads"""], """decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""], """decoder_layerdrop""": args["""decoder_layerdrop"""], """decoder_layers""": args["""decoder_layers"""], """bos_token_id""": 0, """pad_token_id""": 1, """eos_token_id""": 2, """is_encoder_decoder""": True, """scale_embedding""": not args["""no_scale_embedding"""], """tie_word_embeddings""": args["""share_all_embeddings"""], } # good hparam defaults to start with _lowerCAmelCase =5 _lowerCAmelCase =False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: _lowerCAmelCase =best_score_hparams[model_dir]["""length_penalty"""] else: _lowerCAmelCase =1.0 print(F'''Generating {fsmt_model_config_file}''' ) with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(snake_case_ , ensure_ascii=snake_case_ , indent=snake_case_ ) ) # tokenizer config _lowerCAmelCase =os.path.join(snake_case_ , snake_case_ ) _lowerCAmelCase ={ """langs""": [src_lang, tgt_lang], """model_max_length""": 1024, """do_lower_case""": do_lower_case, } print(F'''Generating {fsmt_tokenizer_config_file}''' ) with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(snake_case_ , ensure_ascii=snake_case_ , indent=snake_case_ ) ) # model _lowerCAmelCase =chkpt["""models"""][0] _lowerCAmelCase =model.state_dict() # rename keys to start with 'model.' _lowerCAmelCase =OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() ) # remove unneeded keys _lowerCAmelCase =[ """model.model""", """model.encoder.version""", """model.decoder.version""", """model.encoder_embed_tokens.weight""", """model.decoder_embed_tokens.weight""", """model.encoder.embed_positions._float_tensor""", """model.decoder.embed_positions._float_tensor""", ] for k in ignore_keys: model_state_dict.pop(snake_case_ , snake_case_ ) _lowerCAmelCase =FSMTConfig.from_pretrained(snake_case_ ) _lowerCAmelCase =FSMTForConditionalGeneration(snake_case_ ) # check that it loads ok model_new.load_state_dict(snake_case_ , strict=snake_case_ ) # save _lowerCAmelCase =os.path.join(snake_case_ , snake_case_ ) print(F'''Generating {pytorch_weights_dump_path}''' ) torch.save(snake_case_ , snake_case_ ) print("""Conversion is done!""" ) print("""\nLast step is to upload the files to s3""" ) print(F'''cd {data_root}''' ) print(F'''transformers-cli upload {model_dir}''' ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--fsmt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __A = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
368
"""simple docstring""" import warnings from .generation import TFGenerationMixin class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' # warning at import time warnings.warn( '''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ''' '''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __magic_name__ , )
341
0
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int __A = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowerCamelCase__ ( datasets.BuilderConfig ): '''simple docstring''' lowerCamelCase = None def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , ) -> Optional[int]: import pyspark def generate_fn(): _lowerCAmelCase =df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: _lowerCAmelCase =df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" ) _lowerCAmelCase =partition_df.collect() _lowerCAmelCase =0 for row in rows: yield F'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class lowerCamelCase__ ( _BaseExamplesIterable ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , ) -> List[str]: _lowerCAmelCase =df _lowerCAmelCase =partition_order or range(self.df.rdd.getNumPartitions() ) _lowerCAmelCase =_generate_iterable_examples(self.df , self.partition_order ) def __iter__( self ) -> Tuple: yield from self.generate_examples_fn() def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase =list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(_UpperCAmelCase ) return SparkExamplesIterable(self.df , partition_order=_UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: _lowerCAmelCase =self.split_shard_indices_by_worker(_UpperCAmelCase , _UpperCAmelCase ) return SparkExamplesIterable(self.df , partition_order=_UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> List[Any]: return len(self.partition_order ) class lowerCamelCase__ ( datasets.DatasetBuilder ): '''simple docstring''' lowerCamelCase = SparkConfig def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Union[str, Any]: import pyspark _lowerCAmelCase =pyspark.sql.SparkSession.builder.getOrCreate() _lowerCAmelCase =df _lowerCAmelCase =working_dir super().__init__( cache_dir=_UpperCAmelCase , config_name=str(self.df.semanticHash() ) , **_UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> List[str]: # Returns the path of the created file. def create_cache_and_write_probe(__UpperCAmelCase ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=_UpperCAmelCase ) _lowerCAmelCase =os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(_UpperCAmelCase , """a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _lowerCAmelCase =( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_UpperCAmelCase ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def _lowerCAmelCase ( self ) -> Optional[Any]: return datasets.DatasetInfo(features=self.config.features ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Any: return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: import pyspark def get_arrow_batch_size(__UpperCAmelCase ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) _lowerCAmelCase =self.df.count() _lowerCAmelCase =df_num_rows if df_num_rows <= 1_00 else 1_00 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _lowerCAmelCase =( self.df.limit(_UpperCAmelCase ) .repartition(1 ) .mapInArrow(_UpperCAmelCase , """batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) _lowerCAmelCase =approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _lowerCAmelCase =min(_UpperCAmelCase , int(approx_total_size / max_shard_size ) ) _lowerCAmelCase =self.df.repartition(_UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Tuple: import pyspark _lowerCAmelCase =ParquetWriter if file_format == 'parquet' else ArrowWriter _lowerCAmelCase =os.path.join(self._working_dir , os.path.basename(_UpperCAmelCase ) ) if self._working_dir else fpath _lowerCAmelCase =file_format == 'parquet' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _lowerCAmelCase =self.config.features _lowerCAmelCase =self._writer_batch_size _lowerCAmelCase =self._fs.storage_options def write_arrow(__UpperCAmelCase ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _lowerCAmelCase =pyspark.TaskContext().taskAttemptId() _lowerCAmelCase =next(_UpperCAmelCase , _UpperCAmelCase ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) _lowerCAmelCase =0 _lowerCAmelCase =writer_class( features=_UpperCAmelCase , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=_UpperCAmelCase , storage_options=_UpperCAmelCase , embed_local_files=_UpperCAmelCase , ) _lowerCAmelCase =pa.Table.from_batches([first_batch] ) writer.write_table(_UpperCAmelCase ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _lowerCAmelCase =writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) shard_id += 1 _lowerCAmelCase =writer_class( features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=_UpperCAmelCase , storage_options=_UpperCAmelCase , embed_local_files=_UpperCAmelCase , ) _lowerCAmelCase =pa.Table.from_batches([batch] ) writer.write_table(_UpperCAmelCase ) if writer._num_bytes > 0: _lowerCAmelCase =writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(_UpperCAmelCase ) ): _lowerCAmelCase =os.path.join(os.path.dirname(_UpperCAmelCase ) , os.path.basename(_UpperCAmelCase ) ) shutil.move(_UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase =( self.df.mapInArrow(_UpperCAmelCase , """task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = "arrow" , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> List[Any]: self._validate_cache_dir() _lowerCAmelCase =convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(_UpperCAmelCase ) _lowerCAmelCase =not is_remote_filesystem(self._fs ) _lowerCAmelCase =os.path.join if is_local else posixpath.join _lowerCAmelCase ='-TTTTT-SSSSS-of-NNNNN' _lowerCAmelCase =f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' _lowerCAmelCase =path_join(self._output_dir , _UpperCAmelCase ) _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =[] _lowerCAmelCase =[] for task_id, content in self._prepare_split_single(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): ( _lowerCAmelCase ) =content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(_UpperCAmelCase ) _lowerCAmelCase =total_num_examples _lowerCAmelCase =total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: _lowerCAmelCase =all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _lowerCAmelCase =self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): rename( _UpperCAmelCase , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , ) _lowerCAmelCase =[] _lowerCAmelCase =0 for i in range(len(_UpperCAmelCase ) ): _lowerCAmelCase =task_id_and_num_shards[i] for shard_id in range(_UpperCAmelCase ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(_UpperCAmelCase , len(_UpperCAmelCase ) ).map(lambda __UpperCAmelCase : _rename_shard(*_UpperCAmelCase ) ).collect() else: # don't use any pattern _lowerCAmelCase =0 _lowerCAmelCase =task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(_UpperCAmelCase , """""" ) , ) def _lowerCAmelCase ( self , __UpperCAmelCase , ) -> Any: return SparkExamplesIterable(self.df )
369
"""simple docstring""" import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =decoder_seq_length # For common tests _lowerCAmelCase =self.decoder_seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_attention_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =d_model _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_ffn_dim _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =eos_token_id _lowerCAmelCase =bos_token_id _lowerCAmelCase =pad_token_id _lowerCAmelCase =decoder_start_token_id _lowerCAmelCase =use_cache _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =None _lowerCAmelCase =decoder_seq_length _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =None if self.use_attention_mask: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) _lowerCAmelCase =None if self.use_labels: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]: _lowerCAmelCase =True _lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval() _lowerCAmelCase =input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 ) _lowerCAmelCase =outputs["""past_key_values"""] # create hypothetical next token and extent to next_input_ids _lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and _lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 ) _lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""] _lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""] # select random slice _lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() _lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs _lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else () lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {} lowerCamelCase = True lowerCamelCase = False def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> List[str]: pass def _lowerCAmelCase ( self ) -> List[Any]: pass def _lowerCAmelCase ( self ) -> Any: pass def _lowerCAmelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: return @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def _lowerCAmelCase ( self ) -> str: pass
341
0
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase ) -> str: _lowerCAmelCase =int(__SCREAMING_SNAKE_CASE ) if decimal in (0, 1): # Exit cases for the recursion return str(__SCREAMING_SNAKE_CASE ) _lowerCAmelCase =divmod(__SCREAMING_SNAKE_CASE , 2 ) return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE ) def _lowerCamelCase(__UpperCamelCase ) -> str: _lowerCAmelCase =str(__SCREAMING_SNAKE_CASE ).strip() if not number: raise ValueError("""No input value was provided""" ) _lowerCAmelCase ="-" if number.startswith("""-""" ) else "" _lowerCAmelCase =number.lstrip("""-""" ) if not number.isnumeric(): raise ValueError("""Input value is not an integer""" ) return F'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}''' if __name__ == "__main__": from doctest import testmod testmod()
370
"""simple docstring""" import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase = JukeboxTokenizer lowerCamelCase = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def _lowerCAmelCase ( self ) -> str: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def _lowerCAmelCase ( self ) -> Any: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
341
0
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Dict: _lowerCAmelCase =args.log_outputs _lowerCAmelCase ="""_""".join(args.dataset.split("""/""" ) + [args.config, args.split] ) # load metric _lowerCAmelCase =load_metric("""wer""" ) _lowerCAmelCase =load_metric("""cer""" ) # compute metrics _lowerCAmelCase =wer.compute(references=result["""target"""] , predictions=result["""prediction"""] ) _lowerCAmelCase =cer.compute(references=result["""target"""] , predictions=result["""prediction"""] ) # print & log results _lowerCAmelCase =F'''WER: {wer_result}\nCER: {cer_result}''' print(__lowerCAmelCase ) with open(F'''{dataset_id}_eval_results.txt''' , """w""" ) as f: f.write(__lowerCAmelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: _lowerCAmelCase =F'''log_{dataset_id}_predictions.txt''' _lowerCAmelCase =F'''log_{dataset_id}_targets.txt''' with open(__lowerCAmelCase , """w""" ) as p, open(__lowerCAmelCase , """w""" ) as t: # mapping function to write output def write_to_file(__UpperCamelCase , __UpperCamelCase ): p.write(F'''{i}''' + """\n""" ) p.write(batch["""prediction"""] + """\n""" ) t.write(F'''{i}''' + """\n""" ) t.write(batch["""target"""] + """\n""" ) result.map(__lowerCAmelCase , with_indices=__lowerCAmelCase ) def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]: _lowerCAmelCase ="""[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training _lowerCAmelCase =re.sub(__lowerCAmelCase , """""" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! _lowerCAmelCase =["""\n\n""", """\n""", """ """, """ """] for t in token_sequences_to_ignore: _lowerCAmelCase =""" """.join(text.split(__lowerCAmelCase ) ) return text def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]: # load dataset _lowerCAmelCase =load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__lowerCAmelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor _lowerCAmelCase =AutoFeatureExtractor.from_pretrained(args.model_id ) _lowerCAmelCase =feature_extractor.sampling_rate # resample audio _lowerCAmelCase =dataset.cast_column("""audio""" , Audio(sampling_rate=__lowerCAmelCase ) ) # load eval pipeline if args.device is None: _lowerCAmelCase =0 if torch.cuda.is_available() else -1 _lowerCAmelCase =pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(__UpperCamelCase ): _lowerCAmelCase =asr( batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) _lowerCAmelCase =prediction["""text"""] _lowerCAmelCase =normalize_text(batch["""sentence"""] ) return batch # run inference on all examples _lowerCAmelCase =dataset.map(__lowerCAmelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers' ) parser.add_argument( '--dataset', type=str, required=True, help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets', ) parser.add_argument( '--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice' ) parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`') parser.add_argument( '--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.' ) parser.add_argument( '--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.' ) parser.add_argument( '--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.' ) parser.add_argument( '--device', type=int, default=None, help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.', ) __A = parser.parse_args() main(args)
371
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = '▁' __A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} __A = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } __A = {'vinai/bartpho-syllable': 1024} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =vocab_file _lowerCAmelCase =monolingual_vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _lowerCAmelCase ={} _lowerCAmelCase =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =cnt cnt += 1 with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _lowerCAmelCase =line.strip().split()[0] _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) _lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Dict: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None _lowerCAmelCase =self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCAmelCase ) -> List[Any]: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] _lowerCAmelCase =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCAmelCase ( self ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: return self.fairseq_ids_to_tokens[index] def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__UpperCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
341
0
"""simple docstring""" __A = 'Input must be a string of 8 numbers plus letter' __A = 'TRWAGMYFPDXBNJZSQVHLCKE' def _lowerCamelCase(__UpperCamelCase ) -> bool: if not isinstance(__UpperCamelCase , __UpperCamelCase ): _lowerCAmelCase =F'''Expected string as input, found {type(__UpperCamelCase ).__name__}''' raise TypeError(__UpperCamelCase ) _lowerCAmelCase =spanish_id.replace("""-""" , """""" ).upper() if len(__UpperCamelCase ) != 9: raise ValueError(__UpperCamelCase ) try: _lowerCAmelCase =int(spanish_id_clean[0:8] ) _lowerCAmelCase =spanish_id_clean[8] except ValueError as ex: raise ValueError(__UpperCamelCase ) from ex if letter.isdigit(): raise ValueError(__UpperCamelCase ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
350
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =1 _lowerCAmelCase =3 _lowerCAmelCase =(32, 32) _lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase ) return image @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , ) return model @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def _lowerCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) _lowerCAmelCase =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , ) return CLIPTextModel(__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0] _lowerCAmelCase =image[0, -3:, -3:, -1] _lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1] _lowerCAmelCase =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) _lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images assert image.shape[0] == 2 _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 _lowerCAmelCase =unet.half() _lowerCAmelCase =text_encoder.half() # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images _lowerCAmelCase =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat.npy""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 1e-3 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat_fp16.npy""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 5e-1 def _lowerCAmelCase ( self ) -> Optional[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , ) _lowerCAmelCase =torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
341
0
import math from collections.abc import Callable def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float: _lowerCAmelCase =xa _lowerCAmelCase =xa while True: if x_n == x_na or function(__UpperCamelCase ) == function(__UpperCamelCase ): raise ZeroDivisionError("""float division by zero, could not find root""" ) _lowerCAmelCase =x_na - ( function(__UpperCamelCase ) / ((function(__UpperCamelCase ) - function(__UpperCamelCase )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na _lowerCAmelCase =x_na _lowerCAmelCase =x_na def _lowerCamelCase(__UpperCamelCase ) -> float: return math.pow(__UpperCamelCase , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
351
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''cvt''' def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) _lowerCAmelCase =num_channels _lowerCAmelCase =patch_sizes _lowerCAmelCase =patch_stride _lowerCAmelCase =patch_padding _lowerCAmelCase =embed_dim _lowerCAmelCase =num_heads _lowerCAmelCase =depth _lowerCAmelCase =mlp_ratio _lowerCAmelCase =attention_drop_rate _lowerCAmelCase =drop_rate _lowerCAmelCase =drop_path_rate _lowerCAmelCase =qkv_bias _lowerCAmelCase =cls_token _lowerCAmelCase =qkv_projection_method _lowerCAmelCase =kernel_qkv _lowerCAmelCase =padding_kv _lowerCAmelCase =stride_kv _lowerCAmelCase =padding_q _lowerCAmelCase =stride_q _lowerCAmelCase =initializer_range _lowerCAmelCase =layer_norm_eps
341
0
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase = 1000 ) -> int: _lowerCAmelCase =2**power _lowerCAmelCase =0 while n: _lowerCAmelCase , _lowerCAmelCase =r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
352
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = ['''image_processor''', '''tokenizer'''] lowerCamelCase = '''CLIPImageProcessor''' lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''') def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase =None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __UpperCAmelCase , ) _lowerCAmelCase =kwargs.pop("""feature_extractor""" ) _lowerCAmelCase =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if images is not None: _lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: _lowerCAmelCase =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =self.tokenizer.model_input_names _lowerCAmelCase =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
341
0
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __A = 16 __A = 32 def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase = 16 ) -> Optional[Any]: _lowerCAmelCase =AutoTokenizer.from_pretrained("""bert-base-cased""" ) _lowerCAmelCase =load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) _lowerCAmelCase =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _lowerCAmelCase =datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowerCAmelCase =tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. _lowerCAmelCase =128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _lowerCAmelCase =16 elif accelerator.mixed_precision != "no": _lowerCAmelCase =8 else: _lowerCAmelCase =None return tokenizer.pad( __UpperCamelCase , padding="""longest""" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. _lowerCAmelCase =DataLoader( tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase ) _lowerCAmelCase =DataLoader( tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __A = mocked_dataloaders # noqa: F811 def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> List[str]: # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCamelCase ) == "1": _lowerCAmelCase =2 # New Code # _lowerCAmelCase =int(args.gradient_accumulation_steps ) # Initialize accelerator _lowerCAmelCase =Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCamelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( """Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCAmelCase =config["""lr"""] _lowerCAmelCase =int(config["""num_epochs"""] ) _lowerCAmelCase =int(config["""seed"""] ) _lowerCAmelCase =int(config["""batch_size"""] ) _lowerCAmelCase =evaluate.load("""glue""" , """mrpc""" ) set_seed(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =get_dataloaders(__UpperCamelCase , __UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCAmelCase =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _lowerCAmelCase =model.to(accelerator.device ) # Instantiate optimizer _lowerCAmelCase =AdamW(params=model.parameters() , lr=__UpperCamelCase ) # Instantiate scheduler _lowerCAmelCase =get_linear_schedule_with_warmup( optimizer=__UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =accelerator.prepare( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Now we train the model for epoch in range(__UpperCamelCase ): model.train() for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__UpperCamelCase ): _lowerCAmelCase =model(**__UpperCamelCase ) _lowerCAmelCase =output.loss accelerator.backward(__UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowerCAmelCase =model(**__UpperCamelCase ) _lowerCAmelCase =outputs.logits.argmax(dim=-1 ) _lowerCAmelCase , _lowerCAmelCase =accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__UpperCamelCase , references=__UpperCamelCase , ) _lowerCAmelCase =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , __UpperCamelCase ) def _lowerCamelCase() -> Dict: _lowerCAmelCase =argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=__UpperCamelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) _lowerCAmelCase =parser.parse_args() _lowerCAmelCase ={"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": main()
353
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __A = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['PerceiverFeatureExtractor'] __A = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = StableUnCLIPPipeline lowerCamelCase = TEXT_TO_IMAGE_PARAMS lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false lowerCamelCase = False def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =32 _lowerCAmelCase =embedder_hidden_size # prior components torch.manual_seed(0 ) _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _lowerCAmelCase =CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=__UpperCAmelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) _lowerCAmelCase =PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__UpperCAmelCase , num_layers=1 , ) torch.manual_seed(0 ) _lowerCAmelCase =DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=10_00 , clip_sample=__UpperCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0 ) _lowerCAmelCase =StableUnCLIPImageNormalizer(embedding_dim=__UpperCAmelCase ) _lowerCAmelCase =DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _lowerCAmelCase =CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) _lowerCAmelCase =UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__UpperCAmelCase , layers_per_block=1 , upcast_attention=__UpperCAmelCase , use_linear_projection=__UpperCAmelCase , ) torch.manual_seed(0 ) _lowerCAmelCase =DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , ) torch.manual_seed(0 ) _lowerCAmelCase =AutoencoderKL() _lowerCAmelCase ={ # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Dict: if str(__UpperCAmelCase ).startswith("""mps""" ): _lowerCAmelCase =torch.manual_seed(__UpperCAmelCase ) else: _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _lowerCAmelCase ={ """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=__UpperCAmelCase ) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) _lowerCAmelCase =StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _lowerCAmelCase =torch.Generator(device="""cpu""" ).manual_seed(0 ) _lowerCAmelCase =pipe("""anime turle""" , generator=__UpperCAmelCase , output_type="""np""" ) _lowerCAmelCase =output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _lowerCAmelCase =StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) _lowerCAmelCase =pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _lowerCAmelCase =pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
354
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { 'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml __A = logging.get_logger(__name__) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Dict: def run_func(__UpperCamelCase ): @wraps(__UpperCamelCase ) def run_in_eager_mode(*__UpperCamelCase , **__UpperCamelCase ): return func(*__UpperCamelCase , **__UpperCamelCase ) @wraps(__UpperCamelCase ) @tf.function(experimental_compile=__UpperCamelCase ) def run_in_graph_mode(*__UpperCamelCase , **__UpperCamelCase ): return func(*__UpperCamelCase , **__UpperCamelCase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> ["tf.Tensor"]: _lowerCAmelCase =random.Random() _lowerCAmelCase =[rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(__UpperCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = 42 lowerCamelCase = 42 lowerCamelCase = '''TensorFlow''' @property def _lowerCAmelCase ( self ) -> int: return tf.__version__ def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float: # initialize GPU on separate process _lowerCAmelCase =self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase =self._prepare_inference_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return self._measure_speed(_inference ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float: _lowerCAmelCase =self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase =self._prepare_train_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return self._measure_speed(_train ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> [Memory, Optional[MemorySummary]]: # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCAmelCase ) _lowerCAmelCase =self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase =self._prepare_inference_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return self._measure_memory(_inference ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> [Memory, Optional[MemorySummary]]: if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCAmelCase ) _lowerCAmelCase =self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase =self._prepare_train_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return self._measure_memory(_train ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Callable[[], None]: _lowerCAmelCase =self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _lowerCAmelCase =( hasattr(__UpperCAmelCase , """architectures""" ) and isinstance(config.architectures , __UpperCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _lowerCAmelCase ="""TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _lowerCAmelCase =__import__("""transformers""" , fromlist=[model_class] ) _lowerCAmelCase =getattr(__UpperCAmelCase , __UpperCAmelCase ) _lowerCAmelCase =model_cls(__UpperCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _lowerCAmelCase =TF_MODEL_MAPPING[config.__class__](__UpperCAmelCase ) # encoder-decoder has vocab size saved differently _lowerCAmelCase =config.vocab_size if hasattr(__UpperCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _lowerCAmelCase =random_input_ids(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , training=__UpperCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(__UpperCAmelCase , training=__UpperCAmelCase ) _lowerCAmelCase =encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Callable[[], None]: _lowerCAmelCase =self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _lowerCAmelCase =( hasattr(__UpperCAmelCase , """architectures""" ) and isinstance(config.architectures , __UpperCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _lowerCAmelCase ="""TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _lowerCAmelCase =__import__("""transformers""" , fromlist=[model_class] ) _lowerCAmelCase =getattr(__UpperCAmelCase , __UpperCAmelCase ) _lowerCAmelCase =model_cls(__UpperCAmelCase ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _lowerCAmelCase =TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCAmelCase ) # encoder-decoder has vocab size saved differently _lowerCAmelCase =config.vocab_size if hasattr(__UpperCAmelCase , """vocab_size""" ) else config.encoder.vocab_size _lowerCAmelCase =random_input_ids(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _lowerCAmelCase =model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )[0] _lowerCAmelCase =tf.gradients(__UpperCAmelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _lowerCAmelCase =model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )[0] _lowerCAmelCase =tf.gradients(__UpperCAmelCase , model.trainable_variables ) return gradients _lowerCAmelCase =encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def _lowerCAmelCase ( self , __UpperCAmelCase ) -> float: with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(__UpperCAmelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _lowerCAmelCase =timeit.repeat( __UpperCAmelCase , repeat=self.args.repeat , number=10 , ) return min(__UpperCAmelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> [Memory, MemorySummary]: logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _lowerCAmelCase =start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _lowerCAmelCase ="""N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _lowerCAmelCase =nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _lowerCAmelCase =nvml.nvmlDeviceGetMemoryInfo(__UpperCAmelCase ) _lowerCAmelCase =meminfo.used _lowerCAmelCase =Memory(__UpperCAmelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _lowerCAmelCase =None else: _lowerCAmelCase =measure_peak_memory_cpu(__UpperCAmelCase ) _lowerCAmelCase =Memory(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else memory_bytes if self.args.trace_memory_line_by_line: _lowerCAmelCase =stop_memory_tracing(__UpperCAmelCase ) if memory is None: _lowerCAmelCase =summary.total else: _lowerCAmelCase =None return memory, summary except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
355
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple: if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" ) _lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" ) _lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" ) _lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" ) _lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item _lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" ) _lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" ) _lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _lowerCAmelCase =old_checkpoint[path] _lowerCAmelCase =old_tensor.shape[0] // 3 _lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1) _lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3 _lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 ) _lowerCAmelCase =query.reshape(__UpperCamelCase ) _lowerCAmelCase =key.reshape(__UpperCamelCase ) _lowerCAmelCase =value.reshape(__UpperCamelCase ) for path in paths: _lowerCAmelCase =path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0] else: _lowerCAmelCase =old_checkpoint[path["""old"""]] def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase ={} _lowerCAmelCase =checkpoint["""time_embed.0.weight"""] _lowerCAmelCase =checkpoint["""time_embed.0.bias"""] _lowerCAmelCase =checkpoint["""time_embed.2.weight"""] _lowerCAmelCase =checkpoint["""time_embed.2.bias"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""] _lowerCAmelCase =checkpoint["""out.0.weight"""] _lowerCAmelCase =checkpoint["""out.0.bias"""] _lowerCAmelCase =checkpoint["""out.2.weight"""] _lowerCAmelCase =checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the middle blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the output blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } for i in range(1 , __UpperCamelCase ): _lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key] if F'''input_blocks.{i}.0.op.weight''' in checkpoint: _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.weight''' ] _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.bias''' ] continue _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} _lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase ) if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''input_blocks.{i}.1''', """new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''input_blocks.{i}.1.qkv.bias''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''input_blocks.{i}.1.qkv.weight''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , ) _lowerCAmelCase =middle_blocks[0] _lowerCAmelCase =middle_blocks[1] _lowerCAmelCase =middle_blocks[2] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase ) for i in range(__UpperCamelCase ): _lowerCAmelCase =i // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =i % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]] _lowerCAmelCase ={} for layer in output_block_layers: _lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__UpperCamelCase ) else: _lowerCAmelCase =[layer_name] if len(__UpperCamelCase ) > 1: _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.weight''' ] _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(__UpperCamelCase ) == 2: _lowerCAmelCase =[] if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''output_blocks.{i}.1''', """new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''output_blocks.{i}.1.qkv.bias''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''output_blocks.{i}.1.qkv.weight''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , ) else: _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] ) _lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] ) _lowerCAmelCase =checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') __A = parser.parse_args() __A = torch.load(args.checkpoint_path) with open(args.config_file) as f: __A = json.loads(f.read()) __A = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __A = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) __A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) __A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
341
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { 'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
356
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase =0 _lowerCAmelCase =len(__UpperCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , __UpperCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _lowerCamelCase(__UpperCamelCase ) -> List[Any]: if len(__UpperCamelCase ) <= 1: return arr, 0 _lowerCAmelCase =len(__UpperCamelCase ) // 2 _lowerCAmelCase =arr[0:mid] _lowerCAmelCase =arr[mid:] _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =inversion_p + inversions_q + cross_inversions return c, num_inversions def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any: _lowerCAmelCase =[] _lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0 while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__UpperCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__UpperCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _lowerCamelCase() -> str: _lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , __UpperCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) # an empty list should also have zero inversions _lowerCAmelCase =[] _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) if __name__ == "__main__": main()
341
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __A = logging.get_logger(__name__) class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None: warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
357
"""simple docstring""" import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = True lowerCamelCase = None lowerCamelCase = 1 lowerCamelCase = None lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None def _lowerCAmelCase ( self ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
341
0
from __future__ import annotations from decimal import Decimal from numpy import array def _lowerCamelCase(__UpperCamelCase ) -> list[list[float]]: _lowerCAmelCase =Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(__UpperCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix _lowerCAmelCase =float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError("""This matrix has no inverse.""" ) # Creates a copy of the matrix with swapped positions of the elements _lowerCAmelCase =[[0.0, 0.0], [0.0, 0.0]] _lowerCAmelCase , _lowerCAmelCase =matrix[1][1], matrix[0][0] _lowerCAmelCase , _lowerCAmelCase =-matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(__UpperCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(__UpperCamelCase ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule _lowerCAmelCase =float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError("""This matrix has no inverse.""" ) # Creating cofactor matrix _lowerCAmelCase =[ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] _lowerCAmelCase =(d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) _lowerCAmelCase =-( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) _lowerCAmelCase =(d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) _lowerCAmelCase =-( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) _lowerCAmelCase =(d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) _lowerCAmelCase =-( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) _lowerCAmelCase =(d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) _lowerCAmelCase =-( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) _lowerCAmelCase =(d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) _lowerCAmelCase =array(__UpperCamelCase ) for i in range(3 ): for j in range(3 ): _lowerCAmelCase =cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix _lowerCAmelCase =array(__UpperCamelCase ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(__UpperCamelCase ) # Calculate the inverse of the matrix return [[float(d(__UpperCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
358
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int: return int((input_a, input_a).count(1 ) != 0 ) def _lowerCamelCase() -> None: assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
341
0
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class lowerCamelCase__ ( __magic_name__ , __magic_name__ ): '''simple docstring''' @register_to_config def __init__( self , __UpperCAmelCase = 7_68 , ) -> Dict: super().__init__() _lowerCAmelCase =nn.Parameter(torch.zeros(1 , __UpperCAmelCase ) ) _lowerCAmelCase =nn.Parameter(torch.ones(1 , __UpperCAmelCase ) ) def _lowerCAmelCase ( self , __UpperCAmelCase = None , __UpperCAmelCase = None , ) -> Any: _lowerCAmelCase =nn.Parameter(self.mean.to(__UpperCAmelCase ).to(__UpperCAmelCase ) ) _lowerCAmelCase =nn.Parameter(self.std.to(__UpperCAmelCase ).to(__UpperCAmelCase ) ) return self def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase =(embeds - self.mean) * 1.0 / self.std return embeds def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Any: _lowerCAmelCase =(embeds * self.std) + self.mean return embeds
359
"""simple docstring""" import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py __A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' __A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' __A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple: _lowerCAmelCase =compute_bleu( reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
341
0
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def _lowerCamelCase(__UpperCamelCase ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowerCamelCase() -> Iterator[int]: _lowerCAmelCase =2 while True: if is_prime(__UpperCamelCase ): yield num num += 1 def _lowerCamelCase(__UpperCamelCase = 2000000 ) -> int: return sum(takewhile(lambda __UpperCamelCase : x < n , prime_generator() ) ) if __name__ == "__main__": print(F"""{solution() = }""")
360
"""simple docstring""" import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=512, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def _lowerCamelCase(__UpperCamelCase ) -> List[str]: if string == "True": return True elif string == "False": return False else: raise ValueError(F'''could not parse string as bool {string}''' ) parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) __A = parser.parse_args() __A = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
341
0
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def _lowerCamelCase(__UpperCamelCase ) -> List[Any]: _lowerCAmelCase =os.path.join(args.tf_model_dir , """parameters.json""" ) _lowerCAmelCase =json.loads(open(__UpperCamelCase ).read() ) if not params: raise ValueError( F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' ) if not args.output.endswith(""".pt""" ): _lowerCAmelCase =args.output + """.pt""" _lowerCAmelCase =OrderedDict() with tf.device("""/CPU:0""" ): _lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir ) _lowerCAmelCase =reader.get_variable_to_shape_map() for key_name in shapes.keys(): _lowerCAmelCase =reader.get_tensor(__UpperCamelCase ).astype(np.floataa ) if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ): continue if key_name.startswith("""pasts/""" ): if key_name.startswith("""pasts/mlp""" ): _lowerCAmelCase =int(key_name[9] ) elif key_name.startswith("""pasts/out""" ): _lowerCAmelCase =8 _lowerCAmelCase ="""model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time _lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase =torch.tensor(__UpperCamelCase ) elif key_name.startswith("""model/moe""" ): _lowerCAmelCase =int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/switch_gating/kernel""" ): _lowerCAmelCase ="""model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player _lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase =torch.tensor(__UpperCamelCase ) elif key_name.endswith("""/softmlp/kernel""" ): _lowerCAmelCase ="""model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player _lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase =torch.tensor(__UpperCamelCase ) elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ): _lowerCAmelCase =key_name[-9:-7] for i in range(16 ): _lowerCAmelCase ="""model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer) _lowerCAmelCase =( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided _lowerCAmelCase =torch.tensor(__UpperCamelCase ) elif key_name.startswith("""model/mlp""" ): _lowerCAmelCase =int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/p1/kernel""" ): _lowerCAmelCase ="""model.blocks.%d.feed_forward.mlp.wi.weight""" % player _lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase =torch.tensor(__UpperCamelCase ) elif key_name.endswith("""/p1/bias""" ): _lowerCAmelCase ="""model.blocks.%d.feed_forward.mlp.wi.bias""" % player _lowerCAmelCase =vnp.copy() # same because it is one dimensional _lowerCAmelCase =torch.tensor(__UpperCamelCase ) elif key_name.endswith("""/p2/kernel""" ): _lowerCAmelCase ="""model.blocks.%d.feed_forward.mlp.wo.weight""" % player _lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase =torch.tensor(__UpperCamelCase ) elif key_name.endswith("""/p2/bias""" ): _lowerCAmelCase ="""model.blocks.%d.feed_forward.mlp.wo.bias""" % player _lowerCAmelCase =vnp.copy() # same because it is one dimensional _lowerCAmelCase =torch.tensor(__UpperCamelCase ) elif key_name.startswith("""model/ln""" ): _lowerCAmelCase =int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): _lowerCAmelCase ="""model.blocks.%d.feed_forward.norm.bias""" % player _lowerCAmelCase =vnp.copy() # same because it is one dimensional _lowerCAmelCase =torch.tensor(__UpperCamelCase ) elif key_name.endswith("""/g""" ): _lowerCAmelCase ="""model.blocks.%d.feed_forward.norm.weight""" % player _lowerCAmelCase =vnp.copy() # same because it is one dimensional _lowerCAmelCase =torch.tensor(__UpperCamelCase ) elif key_name.startswith("""model/att""" ): _lowerCAmelCase =int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/qkv/kernel""" ): _lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum _lowerCAmelCase =state[:, 0, :, :] _lowerCAmelCase =state[:, 1, :, :] _lowerCAmelCase =state[:, 2, :, :] _lowerCAmelCase =( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase =( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase =( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase ="""model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player _lowerCAmelCase =torch.tensor(__UpperCamelCase ) _lowerCAmelCase ="""model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player _lowerCAmelCase =torch.tensor(__UpperCamelCase ) _lowerCAmelCase ="""model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player _lowerCAmelCase =torch.tensor(__UpperCamelCase ) elif key_name.endswith("""/o/kernel""" ): _lowerCAmelCase ="""model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player _lowerCAmelCase =( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase =torch.tensor(__UpperCamelCase ) elif key_name.startswith("""model/an""" ): _lowerCAmelCase =int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): _lowerCAmelCase ="""model.blocks.%d.self_attn.norm.bias""" % player _lowerCAmelCase =vnp.copy() # same because it is one dimensional _lowerCAmelCase =torch.tensor(__UpperCamelCase ) elif key_name.endswith("""/g""" ): _lowerCAmelCase ="""model.blocks.%d.self_attn.norm.weight""" % player _lowerCAmelCase =vnp.copy() # same because it is one dimensional _lowerCAmelCase =torch.tensor(__UpperCamelCase ) elif ( key_name.startswith("""model/wte""" ) or key_name.startswith("""model/wpe""" ) or key_name.startswith("""model/ete""" ) ): _lowerCAmelCase ={"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[ key_name[-3:] ] _lowerCAmelCase ="""model.%s.weight""" % nlayer _lowerCAmelCase =vnp.copy() # same in embedded _lowerCAmelCase =torch.tensor(__UpperCamelCase ) if key_name.startswith("""model/wte""" ): _lowerCAmelCase ="""lm_head.weight""" _lowerCAmelCase =vnp.copy() # same in embedded _lowerCAmelCase =torch.tensor(__UpperCamelCase ) elif key_name.startswith("""model/wob""" ): _lowerCAmelCase ="""final_logits_bias""" _lowerCAmelCase =vnp.copy() # same in embedded _lowerCAmelCase =state.reshape((1, -1) ) _lowerCAmelCase =torch.tensor(__UpperCamelCase ) elif key_name == "model/dense/kernel": _lowerCAmelCase ="""model.last_project.weight""" _lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase =torch.tensor(__UpperCamelCase ) elif key_name == "model/dense_1/bias": _lowerCAmelCase ="""model.last_project.bias""" _lowerCAmelCase =vnp.copy() # same because it is one dimensional _lowerCAmelCase =torch.tensor(__UpperCamelCase ) torch.save(__UpperCamelCase , args.output ) if __name__ == "__main__": __A = argparse.ArgumentParser( description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model') parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model') __A = parser.parse_args() convert_tf_gptsan_to_pt(args)
361
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __A = { 'configuration_audio_spectrogram_transformer': [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ASTConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ASTForAudioClassification', 'ASTModel', 'ASTPreTrainedModel', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['ASTFeatureExtractor'] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = LEDConfig lowerCamelCase = {} lowerCamelCase = '''gelu''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=4 , ) -> List[Any]: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =hidden_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =intermediate_size _lowerCAmelCase =hidden_dropout_prob _lowerCAmelCase =attention_probs_dropout_prob _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =eos_token_id _lowerCAmelCase =pad_token_id _lowerCAmelCase =bos_token_id _lowerCAmelCase =attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _lowerCAmelCase =self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _lowerCAmelCase =( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _lowerCAmelCase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _lowerCAmelCase =tf.concat([input_ids, eos_tensor] , axis=1 ) _lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase =self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _lowerCAmelCase =prepare_led_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _lowerCAmelCase =tf.concat( [tf.zeros_like(__UpperCAmelCase )[:, :-1], tf.ones_like(__UpperCAmelCase )[:, -1:]] , axis=-1 , ) _lowerCAmelCase =global_attention_mask return config, inputs_dict def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: _lowerCAmelCase =TFLEDModel(config=__UpperCAmelCase ).get_decoder() _lowerCAmelCase =inputs_dict["""input_ids"""] _lowerCAmelCase =input_ids[:1, :] _lowerCAmelCase =inputs_dict["""attention_mask"""][:1, :] _lowerCAmelCase =1 # first forward pass _lowerCAmelCase =model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) _lowerCAmelCase , _lowerCAmelCase =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _lowerCAmelCase =ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowerCAmelCase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _lowerCAmelCase =tf.concat([input_ids, next_tokens] , axis=-1 ) _lowerCAmelCase =tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _lowerCAmelCase =model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] _lowerCAmelCase =model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _lowerCAmelCase =int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _lowerCAmelCase =output_from_no_past[:, -3:, random_slice_idx] _lowerCAmelCase =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1e-3 ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ) -> Tuple: if attention_mask is None: _lowerCAmelCase =tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _lowerCAmelCase =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _lowerCAmelCase =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _lowerCAmelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else () lowerCamelCase = ( { '''conversational''': TFLEDForConditionalGeneration, '''feature-extraction''': TFLEDModel, '''summarization''': TFLEDForConditionalGeneration, '''text2text-generation''': TFLEDForConditionalGeneration, '''translation''': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) lowerCamelCase = True lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =TFLEDModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Any: self.config_tester.run_common_tests() def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase =tf.zeros_like(inputs_dict["""attention_mask"""] ) _lowerCAmelCase =2 _lowerCAmelCase =tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , ) _lowerCAmelCase =True _lowerCAmelCase =self.model_tester.seq_length _lowerCAmelCase =self.model_tester.encoder_seq_length def check_decoder_attentions_output(__UpperCAmelCase ): _lowerCAmelCase =outputs.decoder_attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(__UpperCAmelCase ): _lowerCAmelCase =[t.numpy() for t in outputs.encoder_attentions] _lowerCAmelCase =[t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _lowerCAmelCase =True _lowerCAmelCase =False _lowerCAmelCase =False _lowerCAmelCase =model_class(__UpperCAmelCase ) _lowerCAmelCase =model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) _lowerCAmelCase =len(__UpperCAmelCase ) self.assertEqual(config.output_hidden_states , __UpperCAmelCase ) check_encoder_attentions_output(__UpperCAmelCase ) if self.is_encoder_decoder: _lowerCAmelCase =model_class(__UpperCAmelCase ) _lowerCAmelCase =model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , __UpperCAmelCase ) check_decoder_attentions_output(__UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _lowerCAmelCase =True _lowerCAmelCase =model_class(__UpperCAmelCase ) _lowerCAmelCase =model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , __UpperCAmelCase ) check_encoder_attentions_output(__UpperCAmelCase ) # Check attention is always last and order is fine _lowerCAmelCase =True _lowerCAmelCase =True _lowerCAmelCase =model_class(__UpperCAmelCase ) _lowerCAmelCase =model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , __UpperCAmelCase ) check_encoder_attentions_output(__UpperCAmelCase ) @unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" ) def _lowerCAmelCase ( self ) -> List[str]: pass def _lowerCAmelCase ( self ) -> str: # TODO: Head-masking not yet implement pass def _lowerCamelCase(__UpperCamelCase ) -> Tuple: return tf.constant(__UpperCamelCase , dtype=tf.intaa ) __A = 1E-4 @slow @require_tf class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led # change to intended input here _lowerCAmelCase =_long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) _lowerCAmelCase =_long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) _lowerCAmelCase =prepare_led_inputs_dict(model.config , __UpperCAmelCase , __UpperCAmelCase ) _lowerCAmelCase =model(**__UpperCAmelCase )[0] _lowerCAmelCase =(1, 10_24, 7_68) self.assertEqual(output.shape , __UpperCAmelCase ) # change to expected output here _lowerCAmelCase =tf.convert_to_tensor( [[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1e-3 ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ) # change to intended input here _lowerCAmelCase =_long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) _lowerCAmelCase =_long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) _lowerCAmelCase =prepare_led_inputs_dict(model.config , __UpperCAmelCase , __UpperCAmelCase ) _lowerCAmelCase =model(**__UpperCAmelCase )[0] _lowerCAmelCase =(1, 10_24, model.config.vocab_size) self.assertEqual(output.shape , __UpperCAmelCase ) # change to expected output here _lowerCAmelCase =tf.convert_to_tensor( [[33.65_07, 6.4_5_7_2, 16.80_89], [5.8_7_3_9, -2.4_2_3_8, 11.29_02], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1e-3 , rtol=1e-3 )
362
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { 'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'], 'tokenization_m2m_100': ['M2M100Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST', 'M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
"""simple docstring""" import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' def __init__( self ) -> Optional[Any]: _lowerCAmelCase =[] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: self.events.append("""on_init_end""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Any: self.events.append("""on_train_begin""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Any: self.events.append("""on_train_end""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> str: self.events.append("""on_epoch_begin""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> int: self.events.append("""on_epoch_end""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Dict: self.events.append("""on_step_begin""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Dict: self.events.append("""on_step_end""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: self.events.append("""on_evaluate""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: self.events.append("""on_predict""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: self.events.append("""on_save""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Any: self.events.append("""on_log""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: self.events.append("""on_prediction_step""" ) @require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =tempfile.mkdtemp() def _lowerCAmelCase ( self ) -> Any: shutil.rmtree(self.output_dir ) def _lowerCAmelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=64 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase=False , **__UpperCAmelCase ) -> List[Any]: # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. _lowerCAmelCase =RegressionDataset(length=__UpperCAmelCase ) _lowerCAmelCase =RegressionDataset(length=__UpperCAmelCase ) _lowerCAmelCase =RegressionModelConfig(a=__UpperCAmelCase , b=__UpperCAmelCase ) _lowerCAmelCase =RegressionPreTrainedModel(__UpperCAmelCase ) _lowerCAmelCase =TrainingArguments(self.output_dir , disable_tqdm=__UpperCAmelCase , report_to=[] , **__UpperCAmelCase ) return Trainer( __UpperCAmelCase , __UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , callbacks=__UpperCAmelCase , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) # Order doesn't matter _lowerCAmelCase =sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : cb.__name__ if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cb.__class__.__name__ ) _lowerCAmelCase =sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : cb.__name__ if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cb.__class__.__name__ ) for cba, cba in zip(__UpperCAmelCase , __UpperCAmelCase ): if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertEqual(__UpperCAmelCase , cba.__class__ ) elif not isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertEqual(cba.__class__ , __UpperCAmelCase ) else: self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: _lowerCAmelCase =["""on_init_end""", """on_train_begin"""] _lowerCAmelCase =0 _lowerCAmelCase =len(trainer.get_eval_dataloader() ) _lowerCAmelCase =["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""] for _ in range(trainer.state.num_train_epochs ): expected_events.append("""on_epoch_begin""" ) for _ in range(__UpperCAmelCase ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("""on_log""" ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("""on_save""" ) expected_events.append("""on_epoch_end""" ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =self.get_trainer() _lowerCAmelCase =DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) # Callbacks passed at init are added to the default callbacks _lowerCAmelCase =self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(__UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback _lowerCAmelCase =self.get_trainer(disable_tqdm=__UpperCAmelCase ) _lowerCAmelCase =DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =DEFAULT_CALLBACKS.copy() + [ProgressCallback] _lowerCAmelCase =self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(__UpperCAmelCase ) expected_callbacks.remove(__UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) _lowerCAmelCase =self.get_trainer() _lowerCAmelCase =trainer.pop_callback(__UpperCAmelCase ) self.assertEqual(cb.__class__ , __UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) trainer.add_callback(__UpperCAmelCase ) expected_callbacks.insert(0 , __UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) # We can also add, pop, or remove by instance _lowerCAmelCase =self.get_trainer() _lowerCAmelCase =trainer.callback_handler.callbacks[0] trainer.remove_callback(__UpperCAmelCase ) expected_callbacks.remove(__UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) _lowerCAmelCase =self.get_trainer() _lowerCAmelCase =trainer.callback_handler.callbacks[0] _lowerCAmelCase =trainer.pop_callback(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) trainer.add_callback(__UpperCAmelCase ) expected_callbacks.insert(0 , __UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Optional[Any]: import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="""ignore""" , category=__UpperCAmelCase ) _lowerCAmelCase =self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() _lowerCAmelCase =trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) ) # Independent log/save/eval _lowerCAmelCase =self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() _lowerCAmelCase =trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) ) _lowerCAmelCase =self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() _lowerCAmelCase =trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) ) _lowerCAmelCase =self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" ) trainer.train() _lowerCAmelCase =trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) ) _lowerCAmelCase =self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" ) trainer.train() _lowerCAmelCase =trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) ) # A bit of everything _lowerCAmelCase =self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , ) trainer.train() _lowerCAmelCase =trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) ) # warning should be emitted for duplicated callbacks with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock: _lowerCAmelCase =self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(__UpperCAmelCase ) in warn_mock.call_args[0][0]
363
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __A = datasets.logging.get_logger(__name__) __A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' __A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' __A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict: _lowerCAmelCase ={doc: key_lines} _lowerCAmelCase ={doc: sys_lines} _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase ) key_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase ) sys_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) if remove_nested: _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' ) logger.info( """Number of resulting singleton clusters in the key """ F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' ) if not keep_singletons: logger.info( F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' """files, respectively""" ) return doc_coref_infos def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: _lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 for name, metric in metrics: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} ) logger.info( name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , ) if conll_subparts_num == 3: _lowerCAmelCase =(conll / 3) * 100 logger.info(F'''CoNLL score: {conll:.2f}''' ) output_scores.update({"""conll_score""": conll} ) return output_scores def _lowerCamelCase(__UpperCamelCase ) -> Tuple: _lowerCAmelCase =False for line in key_lines: if not line.startswith("""#""" ): if len(line.split() ) > 6: _lowerCAmelCase =line.split()[5] if not parse_col == "-": _lowerCAmelCase =True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]: _lowerCAmelCase =[ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: _lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _lowerCAmelCase =evaluate( key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , ) return score
341
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = KandinskyVaaInpaintPipeline lowerCamelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image'''] lowerCamelCase = [ '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''', ] lowerCamelCase = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowerCamelCase = False @property def _lowerCAmelCase ( self ) -> Union[str, Any]: return 32 @property def _lowerCAmelCase ( self ) -> Optional[int]: return 32 @property def _lowerCAmelCase ( self ) -> List[Any]: return self.time_input_dim @property def _lowerCAmelCase ( self ) -> Tuple: return self.time_input_dim * 4 @property def _lowerCAmelCase ( self ) -> int: return 1_00 @property def _lowerCAmelCase ( self ) -> Any: torch.manual_seed(0 ) _lowerCAmelCase ={ """in_channels""": 9, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } _lowerCAmelCase =UNetaDConditionModel(**__UpperCAmelCase ) return model @property def _lowerCAmelCase ( self ) -> Optional[int]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) _lowerCAmelCase =VQModel(**self.dummy_movq_kwargs ) return model def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.dummy_unet _lowerCAmelCase =self.dummy_movq _lowerCAmelCase =DDIMScheduler( num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__UpperCAmelCase , ) _lowerCAmelCase ={ """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Optional[Any]: _lowerCAmelCase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _lowerCAmelCase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __UpperCAmelCase ) # create init_image _lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _lowerCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((2_56, 2_56) ) # create mask _lowerCAmelCase =np.ones((64, 64) , dtype=np.floataa ) _lowerCAmelCase =0 if str(__UpperCAmelCase ).startswith("""mps""" ): _lowerCAmelCase =torch.manual_seed(__UpperCAmelCase ) else: _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _lowerCAmelCase ={ """image""": init_image, """mask_image""": mask, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 2, """guidance_scale""": 4.0, """output_type""": """np""", } return inputs def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase ="""cpu""" _lowerCAmelCase =self.get_dummy_components() _lowerCAmelCase =self.pipeline_class(**__UpperCAmelCase ) _lowerCAmelCase =pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase =pipe(**self.get_dummy_inputs(__UpperCAmelCase ) ) _lowerCAmelCase =output.images _lowerCAmelCase =pipe( **self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0] _lowerCAmelCase =image[0, -3:, -3:, -1] _lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1] print(f'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) _lowerCAmelCase =np.array( [0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def _lowerCAmelCase ( self ) -> str: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" ) _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) _lowerCAmelCase =np.ones((7_68, 7_68) , dtype=np.floataa ) _lowerCAmelCase =0 _lowerCAmelCase ="""a hat""" _lowerCAmelCase =KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__UpperCAmelCase ) _lowerCAmelCase =KandinskyVaaInpaintPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa ) _lowerCAmelCase =pipeline.to(__UpperCAmelCase ) pipeline.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase =torch.Generator(device="""cpu""" ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase =pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() _lowerCAmelCase =pipeline( image=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
364
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = XGLMConfig lowerCamelCase = {} lowerCamelCase = '''gelu''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_input_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =ffn_dim _lowerCAmelCase =activation_function _lowerCAmelCase =activation_dropout _lowerCAmelCase =attention_dropout _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =initializer_range _lowerCAmelCase =None _lowerCAmelCase =0 _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Dict: return XGLMConfig.from_pretrained("""facebook/xglm-564M""" ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _lowerCAmelCase =None if self.use_input_mask: _lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase =self.get_config() _lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowerCAmelCase ( self ) -> str: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) =config_and_inputs _lowerCAmelCase ={ """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCamelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =TFXGLMModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 ) def _lowerCAmelCase ( self ) -> int: self.config_tester.run_common_tests() @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" ) def _lowerCAmelCase ( self ) -> Union[str, Any]: super().test_resize_token_embeddings() @require_tf class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) tf.random.set_seed(0 ) _lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" ) _lowerCAmelCase =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0""" ): _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] ) _lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase ="""left""" # use different length sentences to test batching _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase ) _lowerCAmelCase =inputs["""input_ids"""] _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
341
0
"""simple docstring""" import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup __A = logging.get_logger(__name__) class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' def __init__( self , **__UpperCAmelCase ) -> List[str]: requires_backends(self , ["""bs4"""] ) super().__init__(**__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: _lowerCAmelCase =[] _lowerCAmelCase =[] _lowerCAmelCase =element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag _lowerCAmelCase =parent.find_all(child.name , recursive=__UpperCAmelCase ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(__UpperCAmelCase ) else next(i for i, s in enumerate(__UpperCAmelCase , 1 ) if s is child ) ) _lowerCAmelCase =parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Dict: _lowerCAmelCase =BeautifulSoup(__UpperCAmelCase , """html.parser""" ) _lowerCAmelCase =[] _lowerCAmelCase =[] _lowerCAmelCase =[] for element in html_code.descendants: if type(__UpperCAmelCase ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue _lowerCAmelCase =html.unescape(__UpperCAmelCase ).strip() if not text_in_this_tag: continue all_doc_strings.append(__UpperCAmelCase ) _lowerCAmelCase , _lowerCAmelCase =self.xpath_soup(__UpperCAmelCase ) stringaxtag_seq.append(__UpperCAmelCase ) stringaxsubs_seq.append(__UpperCAmelCase ) if len(__UpperCAmelCase ) != len(__UpperCAmelCase ): raise ValueError("""Number of doc strings and xtags does not correspond""" ) if len(__UpperCAmelCase ) != len(__UpperCAmelCase ): raise ValueError("""Number of doc strings and xsubs does not correspond""" ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: _lowerCAmelCase ="""""" for tagname, subs in zip(__UpperCAmelCase , __UpperCAmelCase ): xpath += f'''/{tagname}''' if subs != 0: xpath += f'''[{subs}]''' return xpath def __call__( self , __UpperCAmelCase ) -> BatchFeature: _lowerCAmelCase =False # Check that strings has a valid type if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _lowerCAmelCase =True elif isinstance(__UpperCAmelCase , (list, tuple) ): if len(__UpperCAmelCase ) == 0 or isinstance(html_strings[0] , __UpperCAmelCase ): _lowerCAmelCase =True if not valid_strings: raise ValueError( """HTML strings must of type `str`, `List[str]` (batch of examples), """ f'''but is of type {type(__UpperCAmelCase )}.''' ) _lowerCAmelCase =bool(isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(html_strings[0] , __UpperCAmelCase )) ) if not is_batched: _lowerCAmelCase =[html_strings] # Get nodes + xpaths _lowerCAmelCase =[] _lowerCAmelCase =[] for html_string in html_strings: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self.get_three_from_single(__UpperCAmelCase ) nodes.append(__UpperCAmelCase ) _lowerCAmelCase =[] for node, tag_list, sub_list in zip(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowerCAmelCase =self.construct_xpath(__UpperCAmelCase , __UpperCAmelCase ) xpath_strings.append(__UpperCAmelCase ) xpaths.append(__UpperCAmelCase ) # return as Dict _lowerCAmelCase ={"""nodes""": nodes, """xpaths""": xpaths} _lowerCAmelCase =BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase ) return encoded_inputs
365
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __A = logging.get_logger(__name__) __A = {'vocab_file': 'spiece.model'} __A = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } __A = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) __A = 0 __A = 1 __A = 2 __A = 3 __A = 4 class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = '''left''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =3 _lowerCAmelCase =do_lower_case _lowerCAmelCase =remove_space _lowerCAmelCase =keep_accents _lowerCAmelCase =vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> str: return len(self.sp_model ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[int]: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None return state def __setstate__( self , __UpperCAmelCase ) -> Tuple: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]: if self.remove_space: _lowerCAmelCase =""" """.join(inputs.strip().split() ) else: _lowerCAmelCase =inputs _lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: _lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase ) _lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] ) if self.do_lower_case: _lowerCAmelCase =outputs.lower() return outputs def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: _lowerCAmelCase =self.preprocess_text(__UpperCAmelCase ) _lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) _lowerCAmelCase =[] for piece in pieces: if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): _lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _lowerCAmelCase =cur_pieces[1:] else: _lowerCAmelCase =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCAmelCase ) else: new_pieces.append(__UpperCAmelCase ) return new_pieces def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]: return self.sp_model.PieceToId(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.IdToPiece(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str: _lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase ) _lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _lowerCAmelCase =[] _lowerCAmelCase =[] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) _lowerCAmelCase =[] sub_texts.append(__UpperCAmelCase ) else: current_sub_text.append(__UpperCAmelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens _lowerCAmelCase ="""""".join(__UpperCAmelCase ) _lowerCAmelCase =( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase ) return clean_text else: return text def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] return ([0] * len(__UpperCAmelCase )) + [1, 1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
341
0
"""simple docstring""" import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> str: assert isinstance(__UpperCamelCase , __UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict: _lowerCAmelCase =tmp_path / """cache""" _lowerCAmelCase ={"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _lowerCAmelCase =TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read() _check_text_dataset(__UpperCamelCase , __UpperCamelCase ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: _lowerCAmelCase =tmp_path / """cache""" _lowerCAmelCase ={"""text""": """string"""} _lowerCAmelCase =features.copy() if features else default_expected_features _lowerCAmelCase =( Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _lowerCAmelCase =TextDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read() _check_text_dataset(__UpperCamelCase , __UpperCamelCase ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: _lowerCAmelCase =tmp_path / """cache""" _lowerCAmelCase ={"""text""": """string"""} _lowerCAmelCase =TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , split=__UpperCamelCase ).read() _check_text_dataset(__UpperCamelCase , __UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: if issubclass(__UpperCamelCase , __UpperCamelCase ): _lowerCAmelCase =text_path elif issubclass(__UpperCamelCase , __UpperCamelCase ): _lowerCAmelCase =[text_path] _lowerCAmelCase =tmp_path / """cache""" _lowerCAmelCase ={"""text""": """string"""} _lowerCAmelCase =TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read() _check_text_dataset(__UpperCamelCase , __UpperCamelCase ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=("train",) ) -> List[str]: assert isinstance(__UpperCamelCase , __UpperCamelCase ) for split in splits: _lowerCAmelCase =dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict: _lowerCAmelCase =tmp_path / """cache""" _lowerCAmelCase ={"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _lowerCAmelCase =TextDatasetReader({"""train""": text_path} , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read() _check_text_datasetdict(__UpperCamelCase , __UpperCamelCase ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple: _lowerCAmelCase =tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" _lowerCAmelCase ={"""text""": """string"""} _lowerCAmelCase =features.copy() if features else default_expected_features _lowerCAmelCase =( Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _lowerCAmelCase =TextDatasetReader({"""train""": text_path} , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read() _check_text_datasetdict(__UpperCamelCase , __UpperCamelCase ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: if split: _lowerCAmelCase ={split: text_path} else: _lowerCAmelCase ="""train""" _lowerCAmelCase ={"""train""": text_path, """test""": text_path} _lowerCAmelCase =tmp_path / """cache""" _lowerCAmelCase ={"""text""": """string"""} _lowerCAmelCase =TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read() _check_text_datasetdict(__UpperCamelCase , __UpperCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
366
"""simple docstring""" from __future__ import annotations def _lowerCamelCase(__UpperCamelCase ) -> bool: _lowerCAmelCase =str(__UpperCamelCase ) return n == n[::-1] def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str: _lowerCAmelCase =0 for i in range(1 , __UpperCamelCase ): if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
341
0
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=32 * 4 , __UpperCAmelCase=32 * 6 , __UpperCAmelCase=4 , __UpperCAmelCase=32 , ) -> Any: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =is_training _lowerCAmelCase =use_auxiliary_loss _lowerCAmelCase =num_queries _lowerCAmelCase =num_channels _lowerCAmelCase =min_size _lowerCAmelCase =max_size _lowerCAmelCase =num_labels _lowerCAmelCase =mask_feature_size def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __UpperCAmelCase ) _lowerCAmelCase =torch.ones([self.batch_size, self.min_size, self.max_size] , device=__UpperCAmelCase ) _lowerCAmelCase =( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__UpperCAmelCase ) > 0.5 ).float() _lowerCAmelCase =(torch.rand((self.batch_size, self.num_labels) , device=__UpperCAmelCase ) > 0.5).long() _lowerCAmelCase =self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _lowerCAmelCase ( self ) -> int: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self.prepare_config_and_inputs() _lowerCAmelCase ={"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: _lowerCAmelCase =output.encoder_hidden_states _lowerCAmelCase =output.pixel_decoder_hidden_states _lowerCAmelCase =output.transformer_decoder_hidden_states self.parent.assertTrue(len(__UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) , config.decoder_config.decoder_layers ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Any: with torch.no_grad(): _lowerCAmelCase =MaskFormerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _lowerCAmelCase =model(pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: _lowerCAmelCase =MaskFormerForInstanceSegmentation(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() def comm_check_on_output(__UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _lowerCAmelCase =model(pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) _lowerCAmelCase =model( pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () lowerCamelCase = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =MaskFormerModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> str: self.config_tester.run_common_tests() def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase , **__UpperCAmelCase , output_hidden_states=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def _lowerCAmelCase ( self ) -> Dict: pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def _lowerCAmelCase ( self ) -> Optional[Any]: pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def _lowerCAmelCase ( self ) -> Tuple: pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def _lowerCAmelCase ( self ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def _lowerCAmelCase ( self ) -> List[Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _lowerCAmelCase ( self ) -> List[str]: pass def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase =model_class(__UpperCAmelCase ) _lowerCAmelCase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase =[*signature.parameters.keys()] _lowerCAmelCase =["""pixel_values"""] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Optional[Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: _lowerCAmelCase =MaskFormerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =(self.model_tester.min_size,) * 2 _lowerCAmelCase ={ """pixel_values""": torch.randn((2, 3, *size) , device=__UpperCAmelCase ), """mask_labels""": torch.randn((2, 10, *size) , device=__UpperCAmelCase ), """class_labels""": torch.zeros(2 , 10 , device=__UpperCAmelCase ).long(), } _lowerCAmelCase =MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase ) _lowerCAmelCase =model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase , **__UpperCAmelCase , output_hidden_states=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase =model_class(__UpperCAmelCase ).to(__UpperCAmelCase ) _lowerCAmelCase =model(**__UpperCAmelCase , output_attentions=__UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def _lowerCAmelCase ( self ) -> Dict: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _lowerCAmelCase =self.all_model_classes[1] _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() _lowerCAmelCase =model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() _lowerCAmelCase =model(__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase ).loss loss.backward() def _lowerCAmelCase ( self ) -> Optional[Any]: # only MaskFormerForInstanceSegmentation has the loss _lowerCAmelCase =self.all_model_classes[1] _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() _lowerCAmelCase =True _lowerCAmelCase =True _lowerCAmelCase =model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() _lowerCAmelCase =model(__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase ) _lowerCAmelCase =outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCAmelCase =outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _lowerCAmelCase =outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCAmelCase =outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1E-4 def _lowerCamelCase() -> List[Any]: _lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCAmelCase ( self ) -> Optional[Any]: return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__UpperCAmelCase ) _lowerCAmelCase =self.default_image_processor _lowerCAmelCase =prepare_img() _lowerCAmelCase =image_processor(__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) _lowerCAmelCase =inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _lowerCAmelCase =model(**__UpperCAmelCase ) _lowerCAmelCase =torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) _lowerCAmelCase =torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) _lowerCAmelCase =torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) _lowerCAmelCase =self.default_image_processor _lowerCAmelCase =prepare_img() _lowerCAmelCase =image_processor(__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) _lowerCAmelCase =inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _lowerCAmelCase =model(**__UpperCAmelCase ) # masks_queries_logits _lowerCAmelCase =outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCAmelCase =[ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] _lowerCAmelCase =torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) # class_queries_logits _lowerCAmelCase =outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCAmelCase =torch.tensor( [ [1.65_12e00, -5.25_72e00, -3.35_19e00], [3.61_69e-02, -5.90_25e00, -2.93_13e00], [1.07_66e-04, -7.76_30e00, -5.12_63e00], ] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(__UpperCAmelCase ) .eval() ) _lowerCAmelCase =self.default_image_processor _lowerCAmelCase =prepare_img() _lowerCAmelCase =image_processor(__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) _lowerCAmelCase =inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _lowerCAmelCase =model(**__UpperCAmelCase ) # masks_queries_logits _lowerCAmelCase =outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCAmelCase =[[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -10.77_11]] _lowerCAmelCase =torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) # class_queries_logits _lowerCAmelCase =outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCAmelCase =torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) _lowerCAmelCase =self.default_image_processor _lowerCAmelCase =image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , ) _lowerCAmelCase =inputs["""pixel_values"""].to(__UpperCAmelCase ) _lowerCAmelCase =[el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]] _lowerCAmelCase =[el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]] with torch.no_grad(): _lowerCAmelCase =model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
367
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''llama''' lowerCamelCase = ['''past_key_values'''] def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: _lowerCAmelCase =vocab_size _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =hidden_size _lowerCAmelCase =intermediate_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads # for backward compatibility if num_key_value_heads is None: _lowerCAmelCase =num_attention_heads _lowerCAmelCase =num_key_value_heads _lowerCAmelCase =hidden_act _lowerCAmelCase =initializer_range _lowerCAmelCase =rms_norm_eps _lowerCAmelCase =pretraining_tp _lowerCAmelCase =use_cache _lowerCAmelCase =rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> str: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f'''got {self.rope_scaling}''' ) _lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase ) _lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
341
0
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase = 1000 ) -> int: _lowerCAmelCase , _lowerCAmelCase =1, 1 _lowerCAmelCase =[] for i in range(1 , n + 1 ): _lowerCAmelCase =prev_numerator + 2 * prev_denominator _lowerCAmelCase =prev_numerator + prev_denominator if len(str(__UpperCamelCase ) ) > len(str(__UpperCamelCase ) ): result.append(__UpperCamelCase ) _lowerCAmelCase =numerator _lowerCAmelCase =denominator return len(__UpperCamelCase ) if __name__ == "__main__": print(F"""{solution() = }""")
368
"""simple docstring""" import warnings from .generation import TFGenerationMixin class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' # warning at import time warnings.warn( '''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ''' '''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __magic_name__ , )
341
0
from __future__ import annotations from fractions import Fraction def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> bool: return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def _lowerCamelCase(__UpperCamelCase ) -> list[str]: _lowerCAmelCase =[] _lowerCAmelCase =11 _lowerCAmelCase =int("""1""" + """0""" * digit_len ) for num in range(__UpperCamelCase , __UpperCamelCase ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(__UpperCamelCase , __UpperCamelCase ): solutions.append(F'''{num}/{den}''' ) den += 1 num += 1 _lowerCAmelCase =10 return solutions def _lowerCamelCase(__UpperCamelCase = 2 ) -> int: _lowerCAmelCase =1.0 for fraction in fraction_list(__UpperCamelCase ): _lowerCAmelCase =Fraction(__UpperCamelCase ) result *= frac.denominator / frac.numerator return int(__UpperCamelCase ) if __name__ == "__main__": print(solution())
369
"""simple docstring""" import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =decoder_seq_length # For common tests _lowerCAmelCase =self.decoder_seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_attention_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =d_model _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_ffn_dim _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =eos_token_id _lowerCAmelCase =bos_token_id _lowerCAmelCase =pad_token_id _lowerCAmelCase =decoder_start_token_id _lowerCAmelCase =use_cache _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =None _lowerCAmelCase =decoder_seq_length _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =None if self.use_attention_mask: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) _lowerCAmelCase =None if self.use_labels: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]: _lowerCAmelCase =True _lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval() _lowerCAmelCase =input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 ) _lowerCAmelCase =outputs["""past_key_values"""] # create hypothetical next token and extent to next_input_ids _lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and _lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 ) _lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""] _lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""] # select random slice _lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() _lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs _lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else () lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {} lowerCamelCase = True lowerCamelCase = False def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> List[str]: pass def _lowerCAmelCase ( self ) -> List[Any]: pass def _lowerCAmelCase ( self ) -> Any: pass def _lowerCAmelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: return @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def _lowerCAmelCase ( self ) -> str: pass
341
0
"""simple docstring""" from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter: _lowerCAmelCase =tau * frequency / samplerate _lowerCAmelCase =sin(__UpperCamelCase ) _lowerCAmelCase =cos(__UpperCamelCase ) _lowerCAmelCase =_sin / (2 * q_factor) _lowerCAmelCase =(1 - _cos) / 2 _lowerCAmelCase =1 - _cos _lowerCAmelCase =1 + alpha _lowerCAmelCase =-2 * _cos _lowerCAmelCase =1 - alpha _lowerCAmelCase =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter: _lowerCAmelCase =tau * frequency / samplerate _lowerCAmelCase =sin(__UpperCamelCase ) _lowerCAmelCase =cos(__UpperCamelCase ) _lowerCAmelCase =_sin / (2 * q_factor) _lowerCAmelCase =(1 + _cos) / 2 _lowerCAmelCase =-1 - _cos _lowerCAmelCase =1 + alpha _lowerCAmelCase =-2 * _cos _lowerCAmelCase =1 - alpha _lowerCAmelCase =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter: _lowerCAmelCase =tau * frequency / samplerate _lowerCAmelCase =sin(__UpperCamelCase ) _lowerCAmelCase =cos(__UpperCamelCase ) _lowerCAmelCase =_sin / (2 * q_factor) _lowerCAmelCase =_sin / 2 _lowerCAmelCase =0 _lowerCAmelCase =-ba _lowerCAmelCase =1 + alpha _lowerCAmelCase =-2 * _cos _lowerCAmelCase =1 - alpha _lowerCAmelCase =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter: _lowerCAmelCase =tau * frequency / samplerate _lowerCAmelCase =sin(__UpperCamelCase ) _lowerCAmelCase =cos(__UpperCamelCase ) _lowerCAmelCase =_sin / (2 * q_factor) _lowerCAmelCase =1 - alpha _lowerCAmelCase =-2 * _cos _lowerCAmelCase =1 + alpha _lowerCAmelCase =IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter: _lowerCAmelCase =tau * frequency / samplerate _lowerCAmelCase =sin(__UpperCamelCase ) _lowerCAmelCase =cos(__UpperCamelCase ) _lowerCAmelCase =_sin / (2 * q_factor) _lowerCAmelCase =10 ** (gain_db / 40) _lowerCAmelCase =1 + alpha * big_a _lowerCAmelCase =-2 * _cos _lowerCAmelCase =1 - alpha * big_a _lowerCAmelCase =1 + alpha / big_a _lowerCAmelCase =-2 * _cos _lowerCAmelCase =1 - alpha / big_a _lowerCAmelCase =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter: _lowerCAmelCase =tau * frequency / samplerate _lowerCAmelCase =sin(__UpperCamelCase ) _lowerCAmelCase =cos(__UpperCamelCase ) _lowerCAmelCase =_sin / (2 * q_factor) _lowerCAmelCase =10 ** (gain_db / 40) _lowerCAmelCase =(big_a + 1) - (big_a - 1) * _cos _lowerCAmelCase =(big_a + 1) + (big_a - 1) * _cos _lowerCAmelCase =(big_a - 1) - (big_a + 1) * _cos _lowerCAmelCase =(big_a - 1) + (big_a + 1) * _cos _lowerCAmelCase =2 * sqrt(__UpperCamelCase ) * alpha _lowerCAmelCase =big_a * (pmc + aaa) _lowerCAmelCase =2 * big_a * mpc _lowerCAmelCase =big_a * (pmc - aaa) _lowerCAmelCase =ppmc + aaa _lowerCAmelCase =-2 * pmpc _lowerCAmelCase =ppmc - aaa _lowerCAmelCase =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter: _lowerCAmelCase =tau * frequency / samplerate _lowerCAmelCase =sin(__UpperCamelCase ) _lowerCAmelCase =cos(__UpperCamelCase ) _lowerCAmelCase =_sin / (2 * q_factor) _lowerCAmelCase =10 ** (gain_db / 40) _lowerCAmelCase =(big_a + 1) - (big_a - 1) * _cos _lowerCAmelCase =(big_a + 1) + (big_a - 1) * _cos _lowerCAmelCase =(big_a - 1) - (big_a + 1) * _cos _lowerCAmelCase =(big_a - 1) + (big_a + 1) * _cos _lowerCAmelCase =2 * sqrt(__UpperCamelCase ) * alpha _lowerCAmelCase =big_a * (ppmc + aaa) _lowerCAmelCase =-2 * big_a * pmpc _lowerCAmelCase =big_a * (ppmc - aaa) _lowerCAmelCase =pmc + aaa _lowerCAmelCase =2 * mpc _lowerCAmelCase =pmc - aaa _lowerCAmelCase =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
370
"""simple docstring""" import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase = JukeboxTokenizer lowerCamelCase = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def _lowerCAmelCase ( self ) -> str: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def _lowerCAmelCase ( self ) -> Any: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
341
0
"""simple docstring""" from math import factorial def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int: # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError("""Please enter positive integers for n and k where n >= k""" ) return factorial(__UpperCamelCase ) // (factorial(__UpperCamelCase ) * factorial(n - k )) if __name__ == "__main__": print( 'The number of five-card hands possible from a standard', F"""fifty-two card deck is: {combinations(52, 5)}\n""", ) print( 'If a class of 40 students must be arranged into groups of', F"""4 for group projects, there are {combinations(40, 4)} ways""", 'to arrange them.\n', ) print( 'If 10 teams are competing in a Formula One race, there', F"""are {combinations(10, 3)} ways that first, second and""", 'third place can be awarded.', )
371
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = '▁' __A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} __A = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } __A = {'vinai/bartpho-syllable': 1024} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =vocab_file _lowerCAmelCase =monolingual_vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _lowerCAmelCase ={} _lowerCAmelCase =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =cnt cnt += 1 with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _lowerCAmelCase =line.strip().split()[0] _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) _lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Dict: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None _lowerCAmelCase =self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCAmelCase ) -> List[Any]: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] _lowerCAmelCase =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCAmelCase ( self ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: return self.fairseq_ids_to_tokens[index] def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__UpperCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
341
0
"""simple docstring""" from __future__ import annotations def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list: _lowerCAmelCase =[] _lowerCAmelCase , _lowerCAmelCase =input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) _lowerCAmelCase =result + left + right return input_list def _lowerCamelCase(__UpperCamelCase ) -> list: if len(__UpperCamelCase ) <= 1: return input_list _lowerCAmelCase =list(__UpperCamelCase ) # iteration for two-way merging _lowerCAmelCase =2 while p <= len(__UpperCamelCase ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase ): _lowerCAmelCase =i _lowerCAmelCase =i + p - 1 _lowerCAmelCase =(low + high + 1) // 2 _lowerCAmelCase =merge(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # final merge of last two parts if p * 2 >= len(__UpperCamelCase ): _lowerCAmelCase =i _lowerCAmelCase =merge(__UpperCamelCase , 0 , __UpperCamelCase , len(__UpperCamelCase ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": __A = input('Enter numbers separated by a comma:\n').strip() if user_input == "": __A = [] else: __A = [int(item.strip()) for item in user_input.split(',')] print(iter_merge_sort(unsorted))
350
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =1 _lowerCAmelCase =3 _lowerCAmelCase =(32, 32) _lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase ) return image @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , ) return model @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def _lowerCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) _lowerCAmelCase =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , ) return CLIPTextModel(__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0] _lowerCAmelCase =image[0, -3:, -3:, -1] _lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1] _lowerCAmelCase =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) _lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images assert image.shape[0] == 2 _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 _lowerCAmelCase =unet.half() _lowerCAmelCase =text_encoder.half() # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images _lowerCAmelCase =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat.npy""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 1e-3 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat_fp16.npy""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 5e-1 def _lowerCAmelCase ( self ) -> Optional[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , ) _lowerCAmelCase =torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
341
0
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: # Load configuration defined in the metadata file with open(__UpperCamelCase ) as metadata_file: _lowerCAmelCase =json.load(__UpperCamelCase ) _lowerCAmelCase =LukeConfig(use_entity_aware_attention=__UpperCamelCase , **metadata["""model_config"""] ) # Load in the weights from the checkpoint_path _lowerCAmelCase =torch.load(__UpperCamelCase , map_location="""cpu""" )["""module"""] # Load the entity vocab file _lowerCAmelCase =load_original_entity_vocab(__UpperCamelCase ) # add an entry for [MASK2] _lowerCAmelCase =max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 _lowerCAmelCase =XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] ) # Add special tokens to the token vocabulary for downstream tasks _lowerCAmelCase =AddedToken("""<ent>""" , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) _lowerCAmelCase =AddedToken("""<ent2>""" , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(__UpperCamelCase ) with open(os.path.join(__UpperCamelCase , """tokenizer_config.json""" ) , """r""" ) as f: _lowerCAmelCase =json.load(__UpperCamelCase ) _lowerCAmelCase ="""MLukeTokenizer""" with open(os.path.join(__UpperCamelCase , """tokenizer_config.json""" ) , """w""" ) as f: json.dump(__UpperCamelCase , __UpperCamelCase ) with open(os.path.join(__UpperCamelCase , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f: json.dump(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =MLukeTokenizer.from_pretrained(__UpperCamelCase ) # Initialize the embeddings of the special tokens _lowerCAmelCase =tokenizer.convert_tokens_to_ids(["""@"""] )[0] _lowerCAmelCase =tokenizer.convert_tokens_to_ids(["""#"""] )[0] _lowerCAmelCase =state_dict["""embeddings.word_embeddings.weight"""] _lowerCAmelCase =word_emb[ent_init_index].unsqueeze(0 ) _lowerCAmelCase =word_emb[enta_init_index].unsqueeze(0 ) _lowerCAmelCase =torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: _lowerCAmelCase =state_dict[bias_name] _lowerCAmelCase =decoder_bias[ent_init_index].unsqueeze(0 ) _lowerCAmelCase =decoder_bias[enta_init_index].unsqueeze(0 ) _lowerCAmelCase =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _lowerCAmelCase =F'''encoder.layer.{layer_index}.attention.self.''' _lowerCAmelCase =state_dict[prefix + matrix_name] _lowerCAmelCase =state_dict[prefix + matrix_name] _lowerCAmelCase =state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _lowerCAmelCase =state_dict["""entity_embeddings.entity_embeddings.weight"""] _lowerCAmelCase =entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 ) _lowerCAmelCase =torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' _lowerCAmelCase =state_dict["""entity_predictions.bias"""] _lowerCAmelCase =entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 ) _lowerCAmelCase =torch.cat([entity_prediction_bias, entity_mask_bias] ) _lowerCAmelCase =LukeForMaskedLM(config=__UpperCamelCase ).eval() state_dict.pop("""entity_predictions.decoder.weight""" ) state_dict.pop("""lm_head.decoder.weight""" ) state_dict.pop("""lm_head.decoder.bias""" ) _lowerCAmelCase =OrderedDict() for key, value in state_dict.items(): if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )): _lowerCAmelCase =state_dict[key] else: _lowerCAmelCase =state_dict[key] _lowerCAmelCase , _lowerCAmelCase =model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase ) if set(__UpperCamelCase ) != {"luke.embeddings.position_ids"}: raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' ) if set(__UpperCamelCase ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs _lowerCAmelCase =MLukeTokenizer.from_pretrained(__UpperCamelCase , task="""entity_classification""" ) _lowerCAmelCase ="""ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).""" _lowerCAmelCase =(0, 9) _lowerCAmelCase =tokenizer(__UpperCamelCase , entity_spans=[span] , return_tensors="""pt""" ) _lowerCAmelCase =model(**__UpperCamelCase ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base _lowerCAmelCase =torch.Size((1, 33, 768) ) _lowerCAmelCase =torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCamelCase , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base _lowerCAmelCase =torch.Size((1, 1, 768) ) _lowerCAmelCase =torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __UpperCamelCase , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction _lowerCAmelCase =MLukeTokenizer.from_pretrained(__UpperCamelCase ) _lowerCAmelCase ="""Tokyo is the capital of <mask>.""" _lowerCAmelCase =(24, 30) _lowerCAmelCase =tokenizer(__UpperCamelCase , entity_spans=[span] , return_tensors="""pt""" ) _lowerCAmelCase =model(**__UpperCamelCase ) _lowerCAmelCase =encoding["""input_ids"""][0].tolist() _lowerCAmelCase =input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) ) _lowerCAmelCase =outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(__UpperCamelCase ) _lowerCAmelCase =outputs.entity_logits[0][0].argmax().item() _lowerCAmelCase =[ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("""Saving PyTorch model to {}""".format(__UpperCamelCase ) ) model.save_pretrained(__UpperCamelCase ) def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]: _lowerCAmelCase =["""[MASK]""", """[PAD]""", """[UNK]"""] _lowerCAmelCase =[json.loads(__UpperCamelCase ) for line in open(__UpperCamelCase )] _lowerCAmelCase ={} for entry in data: _lowerCAmelCase =entry["""id"""] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: _lowerCAmelCase =entity_id break _lowerCAmelCase =F'''{language}:{entity_name}''' _lowerCAmelCase =entity_id return new_mapping if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) __A = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
351
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''cvt''' def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) _lowerCAmelCase =num_channels _lowerCAmelCase =patch_sizes _lowerCAmelCase =patch_stride _lowerCAmelCase =patch_padding _lowerCAmelCase =embed_dim _lowerCAmelCase =num_heads _lowerCAmelCase =depth _lowerCAmelCase =mlp_ratio _lowerCAmelCase =attention_drop_rate _lowerCAmelCase =drop_rate _lowerCAmelCase =drop_path_rate _lowerCAmelCase =qkv_bias _lowerCAmelCase =cls_token _lowerCAmelCase =qkv_projection_method _lowerCAmelCase =kernel_qkv _lowerCAmelCase =padding_kv _lowerCAmelCase =stride_kv _lowerCAmelCase =padding_q _lowerCAmelCase =stride_q _lowerCAmelCase =initializer_range _lowerCAmelCase =layer_norm_eps
341
0
"""simple docstring""" from __future__ import annotations def _lowerCamelCase(__UpperCamelCase ) -> bool: _lowerCAmelCase =str(__UpperCamelCase ) return n == n[::-1] def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str: _lowerCAmelCase =0 for i in range(1 , __UpperCamelCase ): if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
352
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = ['''image_processor''', '''tokenizer'''] lowerCamelCase = '''CLIPImageProcessor''' lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''') def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase =None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __UpperCAmelCase , ) _lowerCAmelCase =kwargs.pop("""feature_extractor""" ) _lowerCAmelCase =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if images is not None: _lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: _lowerCAmelCase =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =self.tokenizer.model_input_names _lowerCAmelCase =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
341
0
"""simple docstring""" import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def _lowerCamelCase(__UpperCamelCase ) -> List[Any]: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def _lowerCamelCase() -> List[str]: with parallel_backend("""spark""" ): assert ParallelBackendConfig.backend_name == "spark" _lowerCAmelCase =[1, 2, 3] with pytest.raises(__UpperCamelCase ): with parallel_backend("""unsupported backend""" ): map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=2 ) with pytest.raises(__UpperCamelCase ): with parallel_backend("""unsupported backend""" ): map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("""num_proc""" , [2, -1] ) def _lowerCamelCase(__UpperCamelCase ) -> Tuple: _lowerCAmelCase =[1, 2] _lowerCAmelCase ={"""a""": 1, """b""": 2} _lowerCAmelCase ={"""a""": [1, 2], """b""": [3, 4]} _lowerCAmelCase ={"""a""": {"""1""": 1}, """b""": 2} _lowerCAmelCase ={"""a""": 1, """b""": 2, """c""": 3, """d""": 4} _lowerCAmelCase =[2, 3] _lowerCAmelCase ={"""a""": 2, """b""": 3} _lowerCAmelCase ={"""a""": [2, 3], """b""": [4, 5]} _lowerCAmelCase ={"""a""": {"""1""": 2}, """b""": 3} _lowerCAmelCase ={"""a""": 2, """b""": 3, """c""": 4, """d""": 5} with parallel_backend("""spark""" ): assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
353
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __A = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['PerceiverFeatureExtractor'] __A = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase =fname.split(os.path.sep )[-1] return re.search(R"""^(.*)_\d+\.jpg$""" , __UpperCamelCase ).groups()[0] class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Union[str, Any]: _lowerCAmelCase =file_names _lowerCAmelCase =image_transform _lowerCAmelCase =label_to_id def __len__( self ) -> Union[str, Any]: return len(self.file_names ) def __getitem__( self , __UpperCAmelCase ) -> List[Any]: _lowerCAmelCase =self.file_names[idx] _lowerCAmelCase =PIL.Image.open(__UpperCAmelCase ) _lowerCAmelCase =raw_image.convert("""RGB""" ) if self.image_transform is not None: _lowerCAmelCase =self.image_transform(__UpperCAmelCase ) _lowerCAmelCase =extract_label(__UpperCAmelCase ) if self.label_to_id is not None: _lowerCAmelCase =self.label_to_id[label] return {"image": image, "label": label} def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any: # Initialize accelerator if args.with_tracking: _lowerCAmelCase =Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir ) else: _lowerCAmelCase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCAmelCase =config["""lr"""] _lowerCAmelCase =int(config["""num_epochs"""] ) _lowerCAmelCase =int(config["""seed"""] ) _lowerCAmelCase =int(config["""batch_size"""] ) _lowerCAmelCase =config["""image_size"""] if not isinstance(__UpperCamelCase , (list, tuple) ): _lowerCAmelCase =(image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , """isdigit""" ): if args.checkpointing_steps == "epoch": _lowerCAmelCase =args.checkpointing_steps elif args.checkpointing_steps.isdigit(): _lowerCAmelCase =int(args.checkpointing_steps ) else: raise ValueError( F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' ) else: _lowerCAmelCase =None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: _lowerCAmelCase =os.path.split(__UpperCamelCase )[-1].split(""".""" )[0] accelerator.init_trackers(__UpperCamelCase , __UpperCamelCase ) # Grab all the image filenames _lowerCAmelCase =[os.path.join(args.data_dir , __UpperCamelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )] # Build the label correspondences _lowerCAmelCase =[extract_label(__UpperCamelCase ) for fname in file_names] _lowerCAmelCase =list(set(__UpperCamelCase ) ) id_to_label.sort() _lowerCAmelCase ={lbl: i for i, lbl in enumerate(__UpperCamelCase )} # Set the seed before splitting the data. np.random.seed(__UpperCamelCase ) torch.manual_seed(__UpperCamelCase ) torch.cuda.manual_seed_all(__UpperCamelCase ) # Split our filenames between train and validation _lowerCAmelCase =np.random.permutation(len(__UpperCamelCase ) ) _lowerCAmelCase =int(0.8 * len(__UpperCamelCase ) ) _lowerCAmelCase =random_perm[:cut] _lowerCAmelCase =random_perm[cut:] # For training we use a simple RandomResizedCrop _lowerCAmelCase =Compose([RandomResizedCrop(__UpperCamelCase , scale=(0.5, 1.0) ), ToTensor()] ) _lowerCAmelCase =PetsDataset( [file_names[i] for i in train_split] , image_transform=__UpperCamelCase , label_to_id=__UpperCamelCase ) # For evaluation, we use a deterministic Resize _lowerCAmelCase =Compose([Resize(__UpperCamelCase ), ToTensor()] ) _lowerCAmelCase =PetsDataset([file_names[i] for i in eval_split] , image_transform=__UpperCamelCase , label_to_id=__UpperCamelCase ) # Instantiate dataloaders. _lowerCAmelCase =DataLoader(__UpperCamelCase , shuffle=__UpperCamelCase , batch_size=__UpperCamelCase , num_workers=4 ) _lowerCAmelCase =DataLoader(__UpperCamelCase , shuffle=__UpperCamelCase , batch_size=__UpperCamelCase , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCAmelCase =create_model("""resnet50d""" , pretrained=__UpperCamelCase , num_classes=len(__UpperCamelCase ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _lowerCAmelCase =model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): _lowerCAmelCase =False for param in model.get_classifier().parameters(): _lowerCAmelCase =True # We normalize the batches of images to be a bit faster. _lowerCAmelCase =torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device ) _lowerCAmelCase =torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer _lowerCAmelCase =torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler _lowerCAmelCase =OneCycleLR(optimizer=__UpperCamelCase , max_lr=__UpperCamelCase , epochs=__UpperCamelCase , steps_per_epoch=len(__UpperCamelCase ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =accelerator.prepare( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # We need to keep track of how many total steps we have iterated over _lowerCAmelCase =0 # We also need to keep track of the starting epoch so files are named properly _lowerCAmelCase =0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' ) accelerator.load_state(args.resume_from_checkpoint ) _lowerCAmelCase =os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint _lowerCAmelCase =[f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) _lowerCAmelCase =dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` _lowerCAmelCase =os.path.splitext(__UpperCamelCase )[0] if "epoch" in training_difference: _lowerCAmelCase =int(training_difference.replace("""epoch_""" , """""" ) ) + 1 _lowerCAmelCase =None else: _lowerCAmelCase =int(training_difference.replace("""step_""" , """""" ) ) _lowerCAmelCase =resume_step // len(__UpperCamelCase ) resume_step -= starting_epoch * len(__UpperCamelCase ) # Now we train the model for epoch in range(__UpperCamelCase , __UpperCamelCase ): model.train() if args.with_tracking: _lowerCAmelCase =0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step _lowerCAmelCase =accelerator.skip_first_batches(__UpperCamelCase , __UpperCamelCase ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader _lowerCAmelCase =train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. _lowerCAmelCase ={k: v.to(accelerator.device ) for k, v in batch.items()} _lowerCAmelCase =(batch["""image"""] - mean) / std _lowerCAmelCase =model(__UpperCamelCase ) _lowerCAmelCase =torch.nn.functional.cross_entropy(__UpperCamelCase , batch["""label"""] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(__UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(__UpperCamelCase , __UpperCamelCase ): _lowerCAmelCase =F'''step_{overall_step}''' if overall_step % checkpointing_steps == 0: if args.output_dir is not None: _lowerCAmelCase =os.path.join(args.output_dir , __UpperCamelCase ) accelerator.save_state(__UpperCamelCase ) model.eval() _lowerCAmelCase =0 _lowerCAmelCase =0 for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. _lowerCAmelCase ={k: v.to(accelerator.device ) for k, v in batch.items()} _lowerCAmelCase =(batch["""image"""] - mean) / std with torch.no_grad(): _lowerCAmelCase =model(__UpperCamelCase ) _lowerCAmelCase =outputs.argmax(dim=-1 ) _lowerCAmelCase , _lowerCAmelCase =accelerator.gather_for_metrics((predictions, batch["""label"""]) ) _lowerCAmelCase =predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() _lowerCAmelCase =accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' ) if args.with_tracking: accelerator.log( { """accuracy""": 100 * eval_metric, """train_loss""": total_loss.item() / len(__UpperCamelCase ), """epoch""": epoch, } , step=__UpperCamelCase , ) if checkpointing_steps == "epoch": _lowerCAmelCase =F'''epoch_{epoch}''' if args.output_dir is not None: _lowerCAmelCase =os.path.join(args.output_dir , __UpperCamelCase ) accelerator.save_state(__UpperCamelCase ) if args.with_tracking: accelerator.end_training() def _lowerCamelCase() -> Optional[Any]: _lowerCAmelCase =argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument("""--data_dir""" , required=__UpperCamelCase , help="""The data folder on disk.""" ) parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" ) parser.add_argument( """--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) parser.add_argument( """--checkpointing_steps""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , ) parser.add_argument( """--output_dir""" , type=__UpperCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , ) parser.add_argument( """--project_dir""" , type=__UpperCamelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , ) _lowerCAmelCase =parser.parse_args() _lowerCAmelCase ={"""lr""": 3E-2, """num_epochs""": 3, """seed""": 42, """batch_size""": 64, """image_size""": 224} training_function(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": main()
354
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { 'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
"""simple docstring""" import os from datetime import datetime as dt from github import Github __A = [ 'good first issue', 'good second issue', 'good difficult issue', 'enhancement', 'new pipeline/model', 'new scheduler', 'wip', ] def _lowerCamelCase() -> Optional[int]: _lowerCAmelCase =Github(os.environ["""GITHUB_TOKEN"""] ) _lowerCAmelCase =g.get_repo("""huggingface/diffusers""" ) _lowerCAmelCase =repo.get_issues(state="""open""" ) for issue in open_issues: _lowerCAmelCase =sorted(issue.get_comments() , key=lambda __UpperCamelCase : i.created_at , reverse=__UpperCamelCase ) _lowerCAmelCase =comments[0] if len(__UpperCamelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="""closed""" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="""open""" ) issue.remove_from_labels("""stale""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) issue.add_to_labels("""stale""" ) if __name__ == "__main__": main()
355
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple: if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" ) _lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" ) _lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" ) _lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" ) _lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item _lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" ) _lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" ) _lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _lowerCAmelCase =old_checkpoint[path] _lowerCAmelCase =old_tensor.shape[0] // 3 _lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1) _lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3 _lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 ) _lowerCAmelCase =query.reshape(__UpperCamelCase ) _lowerCAmelCase =key.reshape(__UpperCamelCase ) _lowerCAmelCase =value.reshape(__UpperCamelCase ) for path in paths: _lowerCAmelCase =path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0] else: _lowerCAmelCase =old_checkpoint[path["""old"""]] def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase ={} _lowerCAmelCase =checkpoint["""time_embed.0.weight"""] _lowerCAmelCase =checkpoint["""time_embed.0.bias"""] _lowerCAmelCase =checkpoint["""time_embed.2.weight"""] _lowerCAmelCase =checkpoint["""time_embed.2.bias"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""] _lowerCAmelCase =checkpoint["""out.0.weight"""] _lowerCAmelCase =checkpoint["""out.0.bias"""] _lowerCAmelCase =checkpoint["""out.2.weight"""] _lowerCAmelCase =checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the middle blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the output blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } for i in range(1 , __UpperCamelCase ): _lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key] if F'''input_blocks.{i}.0.op.weight''' in checkpoint: _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.weight''' ] _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.bias''' ] continue _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} _lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase ) if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''input_blocks.{i}.1''', """new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''input_blocks.{i}.1.qkv.bias''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''input_blocks.{i}.1.qkv.weight''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , ) _lowerCAmelCase =middle_blocks[0] _lowerCAmelCase =middle_blocks[1] _lowerCAmelCase =middle_blocks[2] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase ) for i in range(__UpperCamelCase ): _lowerCAmelCase =i // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =i % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]] _lowerCAmelCase ={} for layer in output_block_layers: _lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__UpperCamelCase ) else: _lowerCAmelCase =[layer_name] if len(__UpperCamelCase ) > 1: _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.weight''' ] _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(__UpperCamelCase ) == 2: _lowerCAmelCase =[] if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''output_blocks.{i}.1''', """new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''output_blocks.{i}.1.qkv.bias''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''output_blocks.{i}.1.qkv.weight''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , ) else: _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] ) _lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] ) _lowerCAmelCase =checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') __A = parser.parse_args() __A = torch.load(args.checkpoint_path) with open(args.config_file) as f: __A = json.loads(f.read()) __A = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __A = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) __A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) __A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
341
0
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase ) -> bool: if not all(x.isalpha() for x in string ): raise ValueError("""String must only contain alphabetic characters.""" ) _lowerCAmelCase =sorted(string.lower() ) return len(__UpperCamelCase ) == len(set(__UpperCamelCase ) ) if __name__ == "__main__": __A = input('Enter a string ').strip() __A = is_isogram(input_str) print(F"""{input_str} is {"an" if isogram else "not an"} isogram.""")
356
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase =0 _lowerCAmelCase =len(__UpperCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , __UpperCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _lowerCamelCase(__UpperCamelCase ) -> List[Any]: if len(__UpperCamelCase ) <= 1: return arr, 0 _lowerCAmelCase =len(__UpperCamelCase ) // 2 _lowerCAmelCase =arr[0:mid] _lowerCAmelCase =arr[mid:] _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =inversion_p + inversions_q + cross_inversions return c, num_inversions def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any: _lowerCAmelCase =[] _lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0 while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__UpperCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__UpperCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _lowerCamelCase() -> str: _lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , __UpperCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) # an empty list should also have zero inversions _lowerCAmelCase =[] _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) if __name__ == "__main__": main()
341
0
"""simple docstring""" import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder __A = 'base_with_context' def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Dict: _lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) ) _lowerCAmelCase =nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCamelCase ) for lyr_num, lyr in enumerate(model.encoders ): _lowerCAmelCase =weights[F'''layers_{lyr_num}'''] _lowerCAmelCase =nn.Parameter( torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) ) _lowerCAmelCase =ly_weight["""attention"""] _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) ) return model def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[int]: _lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCamelCase ) for lyr_num, lyr in enumerate(model.encoders ): _lowerCAmelCase =weights[F'''layers_{lyr_num}'''] _lowerCAmelCase =ly_weight["""attention"""] _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter( torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) ) return model def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> str: _lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCamelCase ) _lowerCAmelCase =nn.Parameter( torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) ) for lyr_num, lyr in enumerate(model.decoders ): _lowerCAmelCase =weights[F'''layers_{lyr_num}'''] _lowerCAmelCase =nn.Parameter( torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) ) _lowerCAmelCase =nn.Parameter( torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) ) _lowerCAmelCase =ly_weight["""self_attention"""] _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) _lowerCAmelCase =ly_weight["""MultiHeadDotProductAttention_0"""] _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter( torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) _lowerCAmelCase =nn.Parameter( torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) ) _lowerCAmelCase =nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) ) return model def _lowerCamelCase(__UpperCamelCase ) -> Optional[int]: _lowerCAmelCase =checkpoints.load_tax_checkpoint(args.checkpoint_path ) _lowerCAmelCase =jnp.tree_util.tree_map(onp.array , __UpperCamelCase ) _lowerCAmelCase =[ """from __gin__ import dynamic_registration""", """from music_spectrogram_diffusion.models.diffusion import diffusion_utils""", """diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""", """diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""", ] _lowerCAmelCase =os.path.join(args.checkpoint_path , """..""" , """config.gin""" ) _lowerCAmelCase =inference.parse_training_gin_file(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =inference.InferenceModel(args.checkpoint_path , __UpperCamelCase ) _lowerCAmelCase =DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" ) _lowerCAmelCase =SpectrogramNotesEncoder( max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , ) _lowerCAmelCase =SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , ) _lowerCAmelCase =TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) _lowerCAmelCase =load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , __UpperCamelCase ) _lowerCAmelCase =load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , __UpperCamelCase ) _lowerCAmelCase =load_decoder(ta_checkpoint["""target"""]["""decoder"""] , __UpperCamelCase ) _lowerCAmelCase =OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" ) _lowerCAmelCase =SpectrogramDiffusionPipeline( notes_encoder=__UpperCamelCase , continuous_encoder=__UpperCamelCase , decoder=__UpperCamelCase , scheduler=__UpperCamelCase , melgan=__UpperCamelCase , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument( '--checkpoint_path', default=F"""{MODEL}/checkpoint_500000""", type=str, required=False, help='Path to the original jax model checkpoint.', ) __A = parser.parse_args() main(args)
357
"""simple docstring""" import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = True lowerCamelCase = None lowerCamelCase = 1 lowerCamelCase = None lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None def _lowerCAmelCase ( self ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
341
0
import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple: if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" ) _lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" ) _lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" ) _lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" ) _lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item _lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" ) _lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" ) _lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _lowerCAmelCase =old_checkpoint[path] _lowerCAmelCase =old_tensor.shape[0] // 3 _lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1) _lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3 _lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 ) _lowerCAmelCase =query.reshape(__UpperCamelCase ) _lowerCAmelCase =key.reshape(__UpperCamelCase ) _lowerCAmelCase =value.reshape(__UpperCamelCase ) for path in paths: _lowerCAmelCase =path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0] else: _lowerCAmelCase =old_checkpoint[path["""old"""]] def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase ={} _lowerCAmelCase =checkpoint["""time_embed.0.weight"""] _lowerCAmelCase =checkpoint["""time_embed.0.bias"""] _lowerCAmelCase =checkpoint["""time_embed.2.weight"""] _lowerCAmelCase =checkpoint["""time_embed.2.bias"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""] _lowerCAmelCase =checkpoint["""out.0.weight"""] _lowerCAmelCase =checkpoint["""out.0.bias"""] _lowerCAmelCase =checkpoint["""out.2.weight"""] _lowerCAmelCase =checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the middle blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the output blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } for i in range(1 , __UpperCamelCase ): _lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key] if F'''input_blocks.{i}.0.op.weight''' in checkpoint: _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.weight''' ] _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.bias''' ] continue _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} _lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase ) if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''input_blocks.{i}.1''', """new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''input_blocks.{i}.1.qkv.bias''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''input_blocks.{i}.1.qkv.weight''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , ) _lowerCAmelCase =middle_blocks[0] _lowerCAmelCase =middle_blocks[1] _lowerCAmelCase =middle_blocks[2] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase ) for i in range(__UpperCamelCase ): _lowerCAmelCase =i // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =i % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]] _lowerCAmelCase ={} for layer in output_block_layers: _lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__UpperCamelCase ) else: _lowerCAmelCase =[layer_name] if len(__UpperCamelCase ) > 1: _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.weight''' ] _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(__UpperCamelCase ) == 2: _lowerCAmelCase =[] if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''output_blocks.{i}.1''', """new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''output_blocks.{i}.1.qkv.bias''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''output_blocks.{i}.1.qkv.weight''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , ) else: _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] ) _lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] ) _lowerCAmelCase =checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') __A = parser.parse_args() __A = torch.load(args.checkpoint_path) with open(args.config_file) as f: __A = json.loads(f.read()) __A = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __A = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) __A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) __A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
358
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int: return int((input_a, input_a).count(1 ) != 0 ) def _lowerCamelCase() -> None: assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
341
0
"""simple docstring""" import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any: # load base model _lowerCAmelCase =StableDiffusionPipeline.from_pretrained(__UpperCamelCase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors _lowerCAmelCase =load_file(__UpperCamelCase ) _lowerCAmelCase =[] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: _lowerCAmelCase =key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" ) _lowerCAmelCase =pipeline.text_encoder else: _lowerCAmelCase =key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" ) _lowerCAmelCase =pipeline.unet # find the target layer _lowerCAmelCase =layer_infos.pop(0 ) while len(__UpperCamelCase ) > -1: try: _lowerCAmelCase =curr_layer.__getattr__(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: _lowerCAmelCase =layer_infos.pop(0 ) elif len(__UpperCamelCase ) == 0: break except Exception: if len(__UpperCamelCase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: _lowerCAmelCase =layer_infos.pop(0 ) _lowerCAmelCase =[] if "lora_down" in key: pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) ) pair_keys.append(__UpperCamelCase ) else: pair_keys.append(__UpperCamelCase ) pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: _lowerCAmelCase =state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) _lowerCAmelCase =state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(__UpperCamelCase , __UpperCamelCase ).unsqueeze(2 ).unsqueeze(3 ) else: _lowerCAmelCase =state_dict[pair_keys[0]].to(torch.floataa ) _lowerCAmelCase =state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(__UpperCamelCase , __UpperCamelCase ) # update visited list for item in pair_keys: visited.append(__UpperCamelCase ) return pipeline if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.' ) parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors' ) parser.add_argument( '--lora_prefix_text_encoder', default='lora_te', type=str, help='The prefix of text encoder weight in safetensors', ) parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW') parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.' ) parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') __A = parser.parse_args() __A = args.base_model_path __A = args.checkpoint_path __A = args.dump_path __A = args.lora_prefix_unet __A = args.lora_prefix_text_encoder __A = args.alpha __A = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) __A = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
359
"""simple docstring""" import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py __A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' __A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' __A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple: _lowerCAmelCase =compute_bleu( reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
341
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''gpt_neox''' def __init__( self , __UpperCAmelCase=5_04_32 , __UpperCAmelCase=61_44 , __UpperCAmelCase=44 , __UpperCAmelCase=64 , __UpperCAmelCase=2_45_76 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.2_5 , __UpperCAmelCase=1_00_00 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Tuple: super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _lowerCAmelCase =vocab_size _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =hidden_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =intermediate_size _lowerCAmelCase =hidden_act _lowerCAmelCase =rotary_pct _lowerCAmelCase =rotary_emb_base _lowerCAmelCase =attention_dropout _lowerCAmelCase =hidden_dropout _lowerCAmelCase =classifier_dropout _lowerCAmelCase =initializer_range _lowerCAmelCase =layer_norm_eps _lowerCAmelCase =use_cache _lowerCAmelCase =tie_word_embeddings _lowerCAmelCase =use_parallel_residual _lowerCAmelCase =rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( """The hidden size is not divisble by the number of attention heads! Make sure to update them!""" ) def __UpperCAmelCase ( self ) -> Optional[int]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f'''got {self.rope_scaling}''' ) _lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase ) _lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
360
"""simple docstring""" import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=512, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def _lowerCamelCase(__UpperCamelCase ) -> List[str]: if string == "True": return True elif string == "False": return False else: raise ValueError(F'''could not parse string as bool {string}''' ) parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) __A = parser.parse_args() __A = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
341
0
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str: if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(__UpperCamelCase , n - 1 , __UpperCamelCase ) * a) % mod else: _lowerCAmelCase =binary_exponentiation(__UpperCamelCase , n / 2 , __UpperCamelCase ) return (b * b) % mod # a prime number __A = 701 __A = 10_0000_0000 __A = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
361
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __A = { 'configuration_audio_spectrogram_transformer': [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ASTConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ASTForAudioClassification', 'ASTModel', 'ASTPreTrainedModel', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['ASTFeatureExtractor'] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
"""simple docstring""" from __future__ import annotations import os from typing import Any import requests __A = 'https://api.github.com' # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user __A = BASE_URL + '/user' # https://github.com/settings/tokens __A = os.environ.get('USER_TOKEN', '') def _lowerCamelCase(__UpperCamelCase ) -> dict[Any, Any]: _lowerCAmelCase ={ """Authorization""": F'''token {auth_token}''', """Accept""": """application/vnd.github.v3+json""", } return requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(F"""{key}: {value}""") else: raise ValueError('\'USER_TOKEN\' field cannot be empty.')
362
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { 'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'], 'tokenization_m2m_100': ['M2M100Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST', 'M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
"""simple docstring""" from random import randint from tempfile import TemporaryFile import numpy as np def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]: _lowerCAmelCase =0 if start < end: _lowerCAmelCase =randint(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =a[end] _lowerCAmelCase =a[pivot] _lowerCAmelCase =temp _lowerCAmelCase , _lowerCAmelCase =_in_place_partition(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) count += _in_place_quick_sort(__UpperCamelCase , __UpperCamelCase , p - 1 ) count += _in_place_quick_sort(__UpperCamelCase , p + 1 , __UpperCamelCase ) return count def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple: _lowerCAmelCase =0 _lowerCAmelCase =randint(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =a[end] _lowerCAmelCase =a[pivot] _lowerCAmelCase =temp _lowerCAmelCase =start - 1 for index in range(__UpperCamelCase , __UpperCamelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value _lowerCAmelCase =new_pivot_index + 1 _lowerCAmelCase =a[new_pivot_index] _lowerCAmelCase =a[index] _lowerCAmelCase =temp _lowerCAmelCase =a[new_pivot_index + 1] _lowerCAmelCase =a[end] _lowerCAmelCase =temp return new_pivot_index + 1, count __A = TemporaryFile() __A = 100 # 1000 elements are to be sorted __A , __A = 0, 1 # mean and standard deviation __A = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array __A = np.load(outfile) __A = len(M) - 1 __A = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
363
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __A = datasets.logging.get_logger(__name__) __A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' __A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' __A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict: _lowerCAmelCase ={doc: key_lines} _lowerCAmelCase ={doc: sys_lines} _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase ) key_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase ) sys_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) if remove_nested: _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' ) logger.info( """Number of resulting singleton clusters in the key """ F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' ) if not keep_singletons: logger.info( F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' """files, respectively""" ) return doc_coref_infos def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: _lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 for name, metric in metrics: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} ) logger.info( name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , ) if conll_subparts_num == 3: _lowerCAmelCase =(conll / 3) * 100 logger.info(F'''CoNLL score: {conll:.2f}''' ) output_scores.update({"""conll_score""": conll} ) return output_scores def _lowerCamelCase(__UpperCamelCase ) -> Tuple: _lowerCAmelCase =False for line in key_lines: if not line.startswith("""#""" ): if len(line.split() ) > 6: _lowerCAmelCase =line.split()[5] if not parse_col == "-": _lowerCAmelCase =True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]: _lowerCAmelCase =[ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: _lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _lowerCAmelCase =evaluate( key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , ) return score
341
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A = { 'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST', 'FalconForCausalLM', 'FalconModel', 'FalconPreTrainedModel', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
364
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = XGLMConfig lowerCamelCase = {} lowerCamelCase = '''gelu''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_input_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =ffn_dim _lowerCAmelCase =activation_function _lowerCAmelCase =activation_dropout _lowerCAmelCase =attention_dropout _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =initializer_range _lowerCAmelCase =None _lowerCAmelCase =0 _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Dict: return XGLMConfig.from_pretrained("""facebook/xglm-564M""" ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _lowerCAmelCase =None if self.use_input_mask: _lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase =self.get_config() _lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowerCAmelCase ( self ) -> str: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) =config_and_inputs _lowerCAmelCase ={ """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCamelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =TFXGLMModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 ) def _lowerCAmelCase ( self ) -> int: self.config_tester.run_common_tests() @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" ) def _lowerCAmelCase ( self ) -> Union[str, Any]: super().test_resize_token_embeddings() @require_tf class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) tf.random.set_seed(0 ) _lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" ) _lowerCAmelCase =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0""" ): _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] ) _lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase ="""left""" # use different length sentences to test batching _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase ) _lowerCAmelCase =inputs["""input_ids"""] _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
341
0
"""simple docstring""" from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar __A = TypeVar('T') def _lowerCamelCase(__UpperCamelCase ) -> int: return (position - 1) // 2 def _lowerCamelCase(__UpperCamelCase ) -> int: return (2 * position) + 1 def _lowerCamelCase(__UpperCamelCase ) -> int: return (2 * position) + 2 class lowerCamelCase__ ( Generic[T] ): '''simple docstring''' def __init__( self ) -> None: _lowerCAmelCase =[] _lowerCAmelCase ={} _lowerCAmelCase =0 def __len__( self ) -> int: return self.elements def __repr__( self ) -> str: return str(self.heap ) def _lowerCAmelCase ( self ) -> bool: # Check if the priority queue is empty return self.elements == 0 def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) _lowerCAmelCase =self.elements self.elements += 1 self._bubble_up(__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) _lowerCAmelCase , _lowerCAmelCase =self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: _lowerCAmelCase , _lowerCAmelCase =self.heap[0] self._bubble_down(__UpperCAmelCase ) return elem def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: # Update the weight of the given key _lowerCAmelCase =self.position_map[elem] _lowerCAmelCase =(elem, weight) if position > 0: _lowerCAmelCase =get_parent_position(__UpperCAmelCase ) _lowerCAmelCase , _lowerCAmelCase =self.heap[parent_position] if parent_weight > weight: self._bubble_up(__UpperCAmelCase ) else: self._bubble_down(__UpperCAmelCase ) else: self._bubble_down(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] _lowerCAmelCase =self.position_map[elem] if curr_pos == 0: return None _lowerCAmelCase =get_parent_position(__UpperCAmelCase ) _lowerCAmelCase , _lowerCAmelCase =self.heap[curr_pos] _lowerCAmelCase , _lowerCAmelCase =self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__UpperCAmelCase , __UpperCAmelCase ) return self._bubble_up(__UpperCAmelCase ) return None def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] _lowerCAmelCase =self.position_map[elem] _lowerCAmelCase , _lowerCAmelCase =self.heap[curr_pos] _lowerCAmelCase =get_child_left_position(__UpperCAmelCase ) _lowerCAmelCase =get_child_right_position(__UpperCAmelCase ) if child_left_position < self.elements and child_right_position < self.elements: _lowerCAmelCase , _lowerCAmelCase =self.heap[child_left_position] _lowerCAmelCase , _lowerCAmelCase =self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__UpperCAmelCase , __UpperCAmelCase ) return self._bubble_down(__UpperCAmelCase ) if child_left_position < self.elements: _lowerCAmelCase , _lowerCAmelCase =self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__UpperCAmelCase , __UpperCAmelCase ) return self._bubble_down(__UpperCAmelCase ) else: return None if child_right_position < self.elements: _lowerCAmelCase , _lowerCAmelCase =self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__UpperCAmelCase , __UpperCAmelCase ) return self._bubble_down(__UpperCAmelCase ) return None def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: # Swap the nodes at the given positions _lowerCAmelCase =self.heap[nodea_pos][0] _lowerCAmelCase =self.heap[nodea_pos][0] _lowerCAmelCase , _lowerCAmelCase =( self.heap[nodea_pos], self.heap[nodea_pos], ) _lowerCAmelCase =nodea_pos _lowerCAmelCase =nodea_pos class lowerCamelCase__ ( Generic[T] ): '''simple docstring''' def __init__( self ) -> None: _lowerCAmelCase ={} _lowerCAmelCase =0 def __repr__( self ) -> str: return str(self.connections ) def __len__( self ) -> int: return self.nodes def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: _lowerCAmelCase ={} self.nodes += 1 def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__UpperCAmelCase ) self.add_node(__UpperCAmelCase ) _lowerCAmelCase =weight _lowerCAmelCase =weight def _lowerCamelCase(__UpperCamelCase , ) -> tuple[dict[T, int], dict[T, T | None]]: _lowerCAmelCase ={node: maxsize for node in graph.connections} _lowerCAmelCase ={node: None for node in graph.connections} _lowerCAmelCase =MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(__UpperCamelCase , __UpperCamelCase ) if priority_queue.is_empty(): return dist, parent # initialization _lowerCAmelCase =priority_queue.extract_min() _lowerCAmelCase =0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _lowerCAmelCase =dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__UpperCamelCase , dist[neighbour] ) _lowerCAmelCase =node # running prim's algorithm while not priority_queue.is_empty(): _lowerCAmelCase =priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _lowerCAmelCase =dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__UpperCamelCase , dist[neighbour] ) _lowerCAmelCase =node return dist, parent
365
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __A = logging.get_logger(__name__) __A = {'vocab_file': 'spiece.model'} __A = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } __A = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) __A = 0 __A = 1 __A = 2 __A = 3 __A = 4 class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = '''left''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =3 _lowerCAmelCase =do_lower_case _lowerCAmelCase =remove_space _lowerCAmelCase =keep_accents _lowerCAmelCase =vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> str: return len(self.sp_model ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[int]: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None return state def __setstate__( self , __UpperCAmelCase ) -> Tuple: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]: if self.remove_space: _lowerCAmelCase =""" """.join(inputs.strip().split() ) else: _lowerCAmelCase =inputs _lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: _lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase ) _lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] ) if self.do_lower_case: _lowerCAmelCase =outputs.lower() return outputs def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: _lowerCAmelCase =self.preprocess_text(__UpperCAmelCase ) _lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) _lowerCAmelCase =[] for piece in pieces: if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): _lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _lowerCAmelCase =cur_pieces[1:] else: _lowerCAmelCase =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCAmelCase ) else: new_pieces.append(__UpperCAmelCase ) return new_pieces def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]: return self.sp_model.PieceToId(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.IdToPiece(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str: _lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase ) _lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _lowerCAmelCase =[] _lowerCAmelCase =[] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) _lowerCAmelCase =[] sub_texts.append(__UpperCAmelCase ) else: current_sub_text.append(__UpperCAmelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens _lowerCAmelCase ="""""".join(__UpperCAmelCase ) _lowerCAmelCase =( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase ) return clean_text else: return text def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] return ([0] * len(__UpperCAmelCase )) + [1, 1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
341
0
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> list: _lowerCAmelCase =len(__UpperCamelCase ) _lowerCAmelCase =[] for i in range(len(__UpperCamelCase ) - pat_len + 1 ): _lowerCAmelCase =True for j in range(__UpperCamelCase ): if s[i + j] != pattern[j]: _lowerCAmelCase =False break if match_found: position.append(__UpperCamelCase ) return position if __name__ == "__main__": assert naive_pattern_search('ABCDEFG', 'DE') == [3] print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
366
"""simple docstring""" from __future__ import annotations def _lowerCamelCase(__UpperCamelCase ) -> bool: _lowerCAmelCase =str(__UpperCamelCase ) return n == n[::-1] def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str: _lowerCAmelCase =0 for i in range(1 , __UpperCamelCase ): if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
341
0